#define F_CPU 16000000UL // AVRJazz28PIN Board Used 16MHz
#include <avr/io.h>
#include <util/delay.h>
#include <avr/interrupt.h>
#define SPI_PORT PORTB
#define SPI_DDR DDRB
#define SPI_CS PB2
void SPI_Write(uint8_t addr, uint8_t dataout)
{
// Enable CS Pin
SPI_PORT &= ~(1<<SPI_CS);
// Start Address transmission (MOSI)
SPDR = addr;
// Wait for transmission complete
while(!(SPSR & (1<<SPIF)));
// Start Data transmission (MOSI)
SPDR = dataout;
// Wait for transmission complete
while(!(SPSR & (1<<SPIF)));
// Disable CS Pin
SPI_PORT |= (1<<SPI_CS);
}
char digit[10] = {0,0,0,0,0,0,0,0,0,0};
digit[0] = 0x7E
digit[1] = 0x30
digit[2] = 0x6D
digit[3] = 0x79
digit[4] = 0x33
digit[5] = 0x5B
digit[6] = 0x5F
digit[7] = 0x70
digit[8] = 0x7F
digit[9] = 0x7B
void main()
{
char ch;
char digits_disp[10] = {0,0,0,0,0,0,0,0,0,0};
ch = digits_disp[3];
}
this is a very basic code. I am designing a clock using the MAX 7219 display driver. Before i go into the detail of it, I wanted to get a basic code working where I will initialise the SPI and then declare the value of each character using the datasheet of MAX7219 and then just write a short array to display random numbers. But this code is not working propoerly and keeps saying:
../exp3.c:45: error: conflicting types for 'digits_disp'
../exp3.c:44: error: previous definition of 'digits_disp' was here
Can you please help me on what I am doing wrong and could you tell me how I can initialize my array so that I can display the character '3' on my simulation? In other words, what line of code will i need to add in order to display the characters in my array?
Thank you.
I can't really make sense of your code, but this part of the code is syntactically invalid:
char digit[10] = {0,0,0,0,0,0,0,0,0,0};
digit[0] = 0x7E
digit[1] = 0x30
digit[2] = 0x6D
digit[3] = 0x79
digit[4] = 0x33
digit[5] = 0x5B
digit[6] = 0x5F
digit[7] = 0x70
digit[8] = 0x7F
digit[9] = 0x7B
You probably want to do this instead:
char digit[10] = { 0x7E, 0x30, 0x6D, 0x79, 0x33, 0x5B, 0x5F, 0x70, 0x7F, 0x7B };
From the comment in the 1st line I gather you are using one of the Atmel chips. If so, here is the article that should help you:
http://www.adnbr.co.uk/articles/max7219-and-7-segment-displays
You can use their code as the starting point:
https://gist.github.com/adnbr/2352797
Here is also Arduino version in case anyone else is interested:
https://gist.github.com/nrdobie/8193350
Related
I have written a program for my At-mega 32 in order to increase and decrease numbers from 0 to 9 and reverse (9 to 0) with the two tactile switch witch you can see in the picture.
But the problem is that while all of the numbers are demonstrated by pressing those switches, only numbers 4 and 7 don't appear properly !?.
My seven segments instead of number 4 demonstrates what you can see in the first picture and for number 7 is shows what you can see in the second picture.
I would appreciate it if someone could have a look at my codes and schematic which I have shared here and help me to find the issue.
Thank you so much,
#include <avr/io.h>
#include <util/delay.h>
#include "global.h"
#include <avr/interrupt.h>
uint8_t Codes[] = {0xFC, 0x60, 0xDA, 0xF2, 0x66, 0xB6, 0xBE, 0xE0, 0xFE, 0xF6};
uint8_t count=0;
//temp=0
void display(uint8_t digit);
void config(void);
ISR(INT0_vect)
{
if(count<9)
{
count++;
display(count);
}
}
ISR(INT1_vect)
{
if(count>0)
{
count--;
display(count);
}
}
int main(void)
{
config();
while(1)
{
}
}
void config(void)
{
DDRA=0xFF;
PORTA=0;
cbi(DDRD,2);
cbi(DDRD,3);
sbi(PORTD,2);
sbi(PORTD,3);
GICR=(1<<INT1) | (1<<INT0);
MCUCR=0; //low level sensitivity
GIFR=(1 << INTF1) | (INTF0);
sei();
}
void display(uint8_t digit)
{
PORTA=Codes[digit];}
Your code looks correct, I even checked the patterns of Codes[].
Each bit of the patterns is assigned to one specific segment of the display:
Port bit
Segment
PA7
a
PA6
b
PA5
c
PA4
d
PA3
e
PA2
f
PA1
g
Your wiring is wrong, unfortunately the schematic does not show the pin names of the display. It seems as if it's upside down.
I was wondering was 0xff, 0x00, and 0x0f represent. TRISA, TRISB, and TRISC are the ports being used on my board.
void main()
{
TRISA = 0xff;
TRISB = 0x00;
TRISC = 0x00;
ADCON1 = 0x0f;
}
TRISA is the tristate controller bits for I/O line A. This turns on or off the tristate gates that select whether the output register powers the pins or not. With the tristates off, the pins are input pins.
I'm not absolutely sure since I haven't checked the manual in over 10 years but I think 0xFF turns on all the tristates, so all the pins are input pins. I could have it backwards though.
When communicating with the ADXL355 accelerometer from a Raspberry Pi, i2c works perfectly. However, when I try using SPI the register addresses don't match those in the manual and I need to understand why.
For example, the following code prints the results back from sending 0x00, 0x01, 0x02 and 0x03 which according to the manual (and what I experience when I use i2c) should correspond to DEVID_AD, DEVID_MST, PARTID, and REVID.
#include <bcm2835.h>
#include <stdio.h>
void transfer(char charToTransfer)
{
char buffer[] = { charToTransfer, 0x00};
bcm2835_spi_transfern(buffer, sizeof(buffer));
printf("Rx: %02X \n", buffer[1]);
}
int main(int argc, char **argv)
{
if (!bcm2835_init())
return 1;
bcm2835_spi_begin();
bcm2835_spi_setBitOrder(BCM2835_SPI_BIT_ORDER_MSBFIRST);
bcm2835_spi_setDataMode(BCM2835_SPI_MODE0);
bcm2835_spi_setClockDivider(BCM2835_SPI_CLOCK_DIVIDER_256);
bcm2835_spi_chipSelect(BCM2835_SPI_CS0);
bcm2835_spi_setChipSelectPolarity(BCM2835_SPI_CS0, LOW);
transfer(0x00);
transfer(0x01);
transfer(0x02);
transfer(0x03);
bcm2835_spi_end();
return 0;
}
The output from this is
Rx: 00
Rx: AD
Rx: 00
Rx: 1D
whereas it should be
Rx: AD
Rx: 1D
Rx: ED
Rx: 01
Any help would be greatly appreciated as at the moment I can't get data from the accelerometer until I understand why this simple communication isn't working.
I think the issue lies with the fact that bit 0 tells the device whether it's read or write. I'm not familiar with the notation of the bar above the W (feel free to correct me), but from my results I guess it has to be a 1 for read, which is why 0x00 became 0x01, 0x01 became 0x03, 0x02 became 0x05 etc as the register address needs to be shifted up 1 bit to allow for the RW bit.
ADXL355 SPI Protocol
In case it's helpful to anyone else, by changing the transfer function as shown below the registers now match up:
void transfer(char charToTransfer, int read)
{
char buffer[] = { (charToTransfer << 1) + read , 0x00};
bcm2835_spi_transfern(buffer, sizeof(buffer));
if(read)
{
printf("Rx: %02X \n", buffer[1]);
}
}
I am working on a project that requires using the GT-511C3 fingerprint scanner with a STM32F407G microcontroller board. I am using Keil uvision 5 and can't figure out how to initialize and turn on the scanner. I've written this code and it seems correct, based off the datasheet, for starting up the scanner and sending the start commands. I've used an oscilloscope to verify that the values are being sent correctly and it seems they are but the scanner is not lighting up. I'm completely lost on what I could be doing wrong or what I could be missing. I've verified the scanner is wired correctly and connected to the board accordingly. Any help would be gr8 :)
Scanner: https://www.digikey.com/catalog/en/partgroup/fingerprint-scanner-ttl-gt-511c3/56722
Current Code:
#include "stm32f4xx.h" // Device header
int i=0;
int z=0;
int main(){
// ENABLE CLocks for PORT C and UART4
RCC -> AHB1ENR = 0x00000004; //Enable the clock to GPIO port GPIOCEN
RCC -> APB1ENR = 0x00080000; //Enable the clock to UART 4 UART4EN
// Set Mode Pins on Port C pins 10 and 11 to Alternate Function
GPIOC->MODER|=0x00A00000;
// Set Alternate Function Register for Port C 10 and 11
GPIOC->AFR[1]|= 0x00008800; //AFR High = enabled
// UART setup:
// 8 data bits (default), no parity (default), one stop bit (default) & no flow control (default)
// UART Baud Rate
UART4->BRR = 0x683;
// Enable TRANSMITTER (TE bit)
UART4->CR1 |= (1<<3);
UART4->CR1 |= (1<<2);
// WE WANT TO TRANSMIT SO ENABLE TRANSMIT INTERRUPT
UART4->CR1 |=(1<<7);
// Enable UART (UE bit in CR1 register)
UART4->CR1 |=(1<<13);
// NVIC to handle interrupts
__enable_irq();
NVIC_ClearPendingIRQ(UART4_IRQn);
NVIC_SetPriority(UART4_IRQn,0);
NVIC_EnableIRQ(UART4_IRQn);
while(1){};
}
// Interrupt reoutine for UART1
void UART4_IRQHandler(void){
// TX IRQ
if(((UART4->SR)&(1<<7))>0){ //if txe flag in sr is on then
if(z==0){
int command[]={0x55,0xAA, 0x01, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x02, 0x01};
UART4->DR=command[i]; // command start code
i=i+1;
if(i==12){
z=1;
i=0;
}
}
if(z==1){
int command1[]={0x55,0xAA, 0x01, 0x00, 0x00, 0x00, 0x00, 0x01, 0x12, 0x00, 0x13, 0x01};
UART4->DR=command1[i]; // command start code
i=i+1;
if(i==12){
z=0;
i=0;
}
}
}
}
First, I'm a student still. So I am not very experienced.
I'm working with a piece of bluetooth hardware and I am using its protocol to send it commands. The protocol requires packets to be sent with LSB first for each packet field.
I was getting error packets back to me indicating my CRC values were wrong so I did some investigating. I found the problem, but I became confused in the process.
Here is Some GDB output and other information elucidating my confusion.
I'm sending a packet that should look like this:
|Start Flag| Packet Num | Command | Payload | CRC | End Flag|
0xfc 0x1 0x0 0x8 0x0 0x5 0x59 0x42 0xfd
Here is some GDB output:
print /x reqId_ep
$1 = {start_flag = 0xfc, data = {packet_num = 0x1, command = {0x0, 0x8}, payload = {
0x0, 0x5}}, crc = 0x5942, end_flag = 0xfd}
reqId_ep is the variable name of the packet I'm sending. It looks all good there, but I am receiving the CRC error codes from it so something must be wrong.
Here I examine 9 bytes in hex starting from the address of my packet to send:
x/9bx 0x7fffffffdee0
0xfc 0x01 0x00 0x08 0x00 0x05 0x42 0x59 0xfd
And here the problem becomes apparent. The CRC is not LSB first. (0x42 0x59)
To fix my problem I removed the htons() that I set my CRC value equal with.
And here is the same output above without htons():
p/x reqId_ep
$1 = {start_flag = 0xfc, data = {packet_num = 0x1, command = {0x0, 0x8}, payload = {
0x0, 0x5}}, crc = 0x4259, end_flag = 0xfd}
Here the CRC value is not LSB.
But then:
x/9bx 0x7fffffffdee0
0xfc 0x01 0x00 0x08 0x00 0x05 0x59 0x42 0xfd
Here the CRC value is LSB first.
So apparently the storing of C is LSB first? Can someone please cast a light of knowledge upon me for this situation? Thank you kindly.
This has to do with Endianness in computing:
http://en.wikipedia.org/wiki/Endianness#Endianness_and_operating_systems_on_architectures
For example, the value 4660 (base-ten) is 0x1234 in hex. On a Big Endian system, it would be stored in memory as 1234 while on a Little Endian system it would be stored as 3412
If you want to avoid this sort of issue in the future, it might just be easiest to create a large array or struct of unsigned char, and store individual values in it.
eg:
|Start Flag| Packet Num | Command | Payload | CRC | End Flag|
0xfc 0x1 0x0 0x8 0x0 0x5 0x59 0x42 0xfd
typedef struct packet {
unsigned char startFlag;
unsigned char packetNum;
unsigned char commandMSB;
unsigned char commandLSB;
unsigned char payloadMSB;
unsigned char payloadLSB;
unsigned char crcMSB;
unsigned char crcLSB;
unsigned char endFlag;
} packet_t;
You could then create a function that you compile differently based on the type of system you are building for using preprocessor macros.
eg:
/* Uncomment the line below if you are using a little endian system;
/* otherwise, leave it commented
*/
//#define LITTLE_ENDIAN_SYSTEM
// Function protocol
void writeCommand(int cmd);
//Function definition
void writeCommand(int cmd, packet_t* pkt)
{
if(!pkt)
{
printf("Error, invalid pointer!");
return;
}
#if LITTLE_ENDIAN_SYSTEM
pkt->commandMSB = (cmd && 0xFF00) >> 8;
pkt->commandLSB = (cmd && 0x00FF);
# else // Big Endian system
pkt->commandMSB = (cmd && 0x00FF);
pkt->commandLSB = (cmd && 0xFF00) >> 8;
#endif
// Done
}
int main void()
{
packet_t myPacket = {0}; //Initialize so it is zeroed out
writeCommand(0x1234,&myPacket);
return 0;
}
One final note: avoid sending structs as a stream of data, send it's individual elements one-at-a-time instead! ie: don't assume that the struct is stored internally in this case like a giant array of unsigned characters. There are things that the compiler and system put in place like packing and allignment, and the struct could actually be larger than 9 x sizeof(unsigned char).
Good luck!
This is architecture dependent based on which processor you're targeting. There are what is known as "Big Endian" systems, which store the most significant byte of a word first, and "Little Endian" systems that store the least significant byte first. It looks like you're looking at a Little Endian system there.