FWIW, a strtok version:
Code:
char ** tokenize ( const char * split_it, const char * delim,
char ** string_ar, const unsigned long AR_LENGTH )
{
/**shouldn't assume that no one will want 'split_it' as-is later, so
**create a copy in memory:**/
char * split_me = duplicate( split_it );
char * found = NULL;
if( split_me != NULL ) {
unsigned long wordcount = 0;
for( found = strtok( split_me, delim );
found != NULL && wordcount < AR_LENGTH;
found = strtok( NULL /**more matches?**/, delim ) )
{
string_ar[wordcount] = duplicate( found );
if( string_ar[wordcount] == NULL )
return NULL; /**failure return**/
++wordcount;
}
string_ar[wordcount] = NULL; /**like argv**/
free( split_me );
/**free( found ); is not nexessary because it simply points
**to a part of the recent 'split_me', and since we copied the
**words to 'string_ar' we can safely free 'split_me'**/
}
return string_ar;
}