mirror of
https://github.com/pbatard/rufus.git
synced 2025-05-18 00:54:27 -04:00
[net] Check for application update (part 2)
* factorized token parser and added update file parsing
This commit is contained in:
parent
bc01064678
commit
b3ed23647f
7 changed files with 202 additions and 61 deletions
235
src/parser.c
235
src/parser.c
|
@ -34,17 +34,80 @@
|
|||
#include "rufus.h"
|
||||
#include "msapi_utf8.h"
|
||||
|
||||
// Parse a file (ANSI or UTF-8 or UTF-16) and return the data for the first occurence of 'token'
|
||||
// The parsed line is of the form: [ ]token[ ]=[ ]["]data["]
|
||||
// The returned string is UTF-8 and MUST be freed by the caller
|
||||
char* get_token_data(const char* filename, const char* token)
|
||||
typedef struct {
|
||||
uint8_t version[4];
|
||||
char* type; // "release", "beta", "notice"
|
||||
char* platform; // target platform ("windows", "linux", etc.)
|
||||
char* platform_arch; // "x86", "x64", "arm"
|
||||
char* platform_min; // minimum platform version required
|
||||
char* download_url[2];
|
||||
char* release_notes;
|
||||
} rufus_update;
|
||||
|
||||
|
||||
// Parse a line of UTF-16 text and return the data if it matches the 'token'
|
||||
// The parsed line is of the form: [ ]token[ ]=[ ]["]data["][ ] and the line
|
||||
// is modified by the parser
|
||||
static wchar_t* get_token_data_line(const wchar_t* wtoken, wchar_t* wline)
|
||||
{
|
||||
wchar_t *wtoken = NULL, *wfilename = NULL;
|
||||
wchar_t wspace[] = L" \t";
|
||||
wchar_t weol[] = L"\r\n";
|
||||
const wchar_t wspace[] = L" \t"; // The only whitespaces we recognize as such
|
||||
const wchar_t weol[] = L"\r\n";
|
||||
size_t i, r;
|
||||
BOOLEAN quoteth;
|
||||
|
||||
if ((wtoken == NULL) || (wline == NULL) || (wline[0] == 0))
|
||||
return NULL;
|
||||
|
||||
// Eliminate trailing EOL characters
|
||||
wline[wcscspn(wline, weol)] = 0;
|
||||
|
||||
i = 0;
|
||||
|
||||
// Skip leading spaces
|
||||
i += wcsspn(&wline[i], wspace);
|
||||
|
||||
// Our token should begin a line
|
||||
if (_wcsnicmp(&wline[i], wtoken, wcslen(wtoken)) != 0)
|
||||
return NULL;
|
||||
|
||||
// Token was found, move past token
|
||||
i += wcslen(wtoken);
|
||||
|
||||
// Skip spaces
|
||||
i += wcsspn(&wline[i], wspace);
|
||||
|
||||
// Check for an equal sign
|
||||
if (wline[i] != L'=')
|
||||
return NULL;
|
||||
i++;
|
||||
|
||||
// Skip spaces after equal sign
|
||||
i += wcsspn(&wline[i], wspace);
|
||||
|
||||
// eliminate leading quote, if it exists
|
||||
if (wline[i] == L'"') {
|
||||
quoteth = TRUE;
|
||||
i++;
|
||||
}
|
||||
|
||||
// Keep the starting pos of our data
|
||||
r = i;
|
||||
|
||||
// locate end of string or quote
|
||||
while ( (wline[i] != 0) && ((wline[i] != L'"') || ((wline[i] == L'"') && (!quoteth))) )
|
||||
i++;
|
||||
wline[i] = 0;
|
||||
|
||||
return (wline[r] == 0)?NULL:&wline[r];
|
||||
}
|
||||
|
||||
// Parse a file (ANSI or UTF-8 or UTF-16) and return the data for the first occurence of 'token'
|
||||
// The returned string is UTF-8 and MUST be freed by the caller
|
||||
char* get_token_data_file(const char* token, const char* filename)
|
||||
{
|
||||
wchar_t *wtoken = NULL, *wdata= NULL, *wfilename = NULL;
|
||||
wchar_t buf[1024];
|
||||
FILE* fd = NULL;
|
||||
size_t i, r;
|
||||
char *ret = NULL;
|
||||
|
||||
if ((filename == NULL) || (token == NULL))
|
||||
|
@ -68,46 +131,11 @@ char* get_token_data(const char* filename, const char* token)
|
|||
// Process individual lines. NUL is always appended.
|
||||
// Ideally, we'd check that our buffer fits the line
|
||||
while (fgetws(buf, ARRAYSIZE(buf), fd) != NULL) {
|
||||
|
||||
// Eliminate trailing EOL characters
|
||||
buf[wcscspn(buf, weol)] = 0;
|
||||
|
||||
i = 0;
|
||||
|
||||
// Skip leading spaces
|
||||
i += wcsspn(&buf[i], wspace);
|
||||
|
||||
// Our token should begin a line
|
||||
if (_wcsnicmp(&buf[i], wtoken, wcslen(wtoken)) != 0)
|
||||
continue;
|
||||
|
||||
// Token was found, move past token
|
||||
i += wcslen(wtoken);
|
||||
|
||||
// Skip spaces
|
||||
i += wcsspn(&buf[i], wspace);
|
||||
|
||||
// Check for an equal sign
|
||||
if (buf[i] != L'=')
|
||||
continue;
|
||||
i++;
|
||||
|
||||
// Skip spaces after equal sign
|
||||
i += wcsspn(&buf[i], wspace);
|
||||
|
||||
// eliminate leading quote, if it exists
|
||||
if (buf[i] == L'"')
|
||||
i++;
|
||||
|
||||
// Keep the starting pos of our data
|
||||
r = i;
|
||||
|
||||
// locate end of string or quote
|
||||
while ((buf[i] != 0) && (buf[i] != L'"'))
|
||||
i++;
|
||||
buf[i] = 0;
|
||||
ret = wchar_to_utf8(&buf[r]);
|
||||
break;
|
||||
wdata = get_token_data_line(wtoken, buf);
|
||||
if (wdata != NULL) {
|
||||
ret = wchar_to_utf8(wdata);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
|
@ -118,6 +146,115 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
// Parse a buffer (ANSI or UTF-8) and return the data for the 'n'th occurence of 'token'
|
||||
// The returned string is UTF-8 and MUST be freed by the caller
|
||||
char* get_token_data_buffer(const char* token, unsigned int n, const char* buffer, size_t buffer_size)
|
||||
{
|
||||
unsigned int j;
|
||||
wchar_t *wtoken = NULL, *wdata = NULL, *wbuffer = NULL, *wline = NULL;
|
||||
size_t i;
|
||||
BOOL done = FALSE;
|
||||
char* ret = NULL;
|
||||
|
||||
// We're handling remote data => better safe than sorry
|
||||
if ((token == NULL) || (buffer == NULL) || (buffer_size <= 4) || (buffer_size > 65536))
|
||||
goto out;
|
||||
|
||||
// Ensure that our buffer is NUL terminated
|
||||
if (buffer[buffer_size-1] != 0)
|
||||
goto out;
|
||||
|
||||
wbuffer = utf8_to_wchar(buffer);
|
||||
wtoken = utf8_to_wchar(token);
|
||||
if ((wbuffer == NULL) || (wtoken == NULL))
|
||||
goto out;
|
||||
|
||||
// Process individual lines
|
||||
for (i=0,j=0,done=FALSE; (j!=n)&&(!done); ) {
|
||||
wline = &wbuffer[i];
|
||||
|
||||
for(;(wbuffer[i]!=L'\n')&&(wbuffer[i]!=L'\r')&&(wbuffer[i]!=0);i++);
|
||||
if (wbuffer[i]==0) {
|
||||
done = TRUE;
|
||||
} else {
|
||||
wbuffer[i++] = 0;
|
||||
}
|
||||
wdata = get_token_data_line(wtoken, wline);
|
||||
if (wdata != NULL) {
|
||||
j++;
|
||||
}
|
||||
}
|
||||
out:
|
||||
if (wdata != NULL)
|
||||
ret = wchar_to_utf8(wdata);
|
||||
safe_free(wbuffer);
|
||||
safe_free(wtoken);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static __inline char* get_sanitized_token_data_buffer(const char* token, unsigned int n, const char* buffer, size_t buffer_size)
|
||||
{
|
||||
size_t i;
|
||||
char* data = get_token_data_buffer(token, n, buffer, buffer_size);
|
||||
if (data != NULL) {
|
||||
for (i=0; i<safe_strlen(data); i++) {
|
||||
if ((data[i] == '\\') && (data[i+1] == 'n')) {
|
||||
data[i] = '\r';
|
||||
data[i+1] = '\n';
|
||||
}
|
||||
}
|
||||
}
|
||||
return data;
|
||||
}
|
||||
|
||||
// Parse an update data file and populates a rufus_update structure.
|
||||
// NB: since this is remote data, and we're running elevated, even if it comes from a
|
||||
// supposedly trusted server, it *IS* considered potentially malicious, so we treat
|
||||
// it as such
|
||||
void parse_update(char* buf)
|
||||
{
|
||||
size_t i, len = safe_strlen(buf);
|
||||
char *data = NULL, *token;
|
||||
char allowed_chars[] = " \t\r\nabcdefghijklmnopqrstuvwxyz"
|
||||
"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"$%^&*()-_+=<>(){}[].,:;#@'/?|~";
|
||||
rufus_update update;
|
||||
|
||||
// Sanitize the data - Of course not a silver bullet, but it helps
|
||||
for (i=0; i<len; i++) {
|
||||
// Do not sanitize \n yet
|
||||
// NB: we have a zero terminator, so we can afford a +1 without overflow
|
||||
if ((strchr(allowed_chars, buf[i]) == NULL) && (buf[i] != '\\') && (buf[i+1] != 'n')) {
|
||||
buf[i] = ' ';
|
||||
}
|
||||
}
|
||||
|
||||
if ((data = get_sanitized_token_data_buffer("version", 1, buf, len+1)) != NULL) {
|
||||
for (i=0; (i<4) && ((token = strtok((i==0)?data:NULL, ".")) != NULL); i++) {
|
||||
update.version[i] = (uint8_t)atoi(token);
|
||||
}
|
||||
safe_free(data);
|
||||
}
|
||||
// TODO: use X-Macros?
|
||||
update.type = get_sanitized_token_data_buffer("type", 1, buf, len+1);
|
||||
update.platform = get_sanitized_token_data_buffer("platform", 1, buf, len+1);
|
||||
update.platform_arch = get_sanitized_token_data_buffer("platform_arch", 1, buf, len+1);
|
||||
update.platform_min = get_sanitized_token_data_buffer("platform_min", 1, buf, len+1);
|
||||
for (i=0; i<ARRAYSIZE(update.download_url); i++) {
|
||||
update.download_url[i] = get_sanitized_token_data_buffer("download_url", (unsigned int)i+1, buf, len+1);
|
||||
}
|
||||
update.release_notes = get_sanitized_token_data_buffer("release_notes", 1, buf, len+1);
|
||||
|
||||
uprintf("UPDATE DATA:\n");
|
||||
uprintf(" version: %d.%d.%d.%d\n", update.version[0], update.version[1], update.version[2], update.version[3]);
|
||||
uprintf(" platform: %s\r\n platform_arch: %s\r\n platform_min: %s\n", update.platform, update.platform_arch, update.platform_min);
|
||||
for (i=0; i<ARRAYSIZE(update.download_url); i++) {
|
||||
uprintf(" url%d: %s\n", i+1, update.download_url[i]);
|
||||
}
|
||||
uprintf("RELEASE NOTES:\r\n%s\n", update.release_notes);
|
||||
|
||||
// TODO: free all these strings!
|
||||
}
|
||||
|
||||
// Insert entry 'data' under section 'section' of a config file
|
||||
// Section must include the relevant delimitors (eg '[', ']') if needed
|
||||
char* insert_section_data(const char* filename, const char* section, const char* data, BOOL dos2unix)
|
||||
|
@ -245,7 +382,7 @@ out:
|
|||
}
|
||||
|
||||
// Search for a specific 'src' substring the data for all occurences of 'token', and replace
|
||||
// if with 'rep'. File can be ANSI or UNICODE and is overwritten. Parameters are UTF-8.
|
||||
// it with 'rep'. File can be ANSI or UNICODE and is overwritten. Parameters are UTF-8.
|
||||
// The parsed line is of the form: [ ]token[ ]data
|
||||
// Returns a pointer to rep if replacement occured, NULL otherwise
|
||||
char* replace_in_token_data(const char* filename, const char* token, const char* src, const char* rep, BOOL dos2unix)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue