Personally, I don't think either of them will be very useful - but it's kind of fun to see if it can be made to work - and anyway, I was running out of things to add to Catalina while waiting for the P16X32B, so why not?
Ross.
I wouldn't say that. I've found the GCC COG mode to be useful. Eric even wrote a Pong game including the video driver entirely within a single COG.
FYI, this is an i2c driver written using COG mode in PropGCC.
/* i2c_driver.c - i2c single master driver
Copyright (c) 2012 David Michael Betz
Permission is hereby granted, free of charge, to any person obtaining a copy of this software
and associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute,
sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef __PROPELLER2__
#include <propeller.h>
#include "i2c_driver.h"
/* minimum overhead per half cycle */
#define MINIMUM_OVERHEAD 32
#ifdef PARALLAX_I2C_BUS
/* set sda high by allowing it to float high, set low by forcing it low */
/* actively drive scl */
#define i2c_set_scl_high() (OUTA |= scl_mask)
#define i2c_set_scl_low() (OUTA &= ~scl_mask)
#define i2c_set_sda_high() (DIRA &= ~sda_mask)
#define i2c_set_sda_low() (DIRA |= sda_mask)
#else
/* set high by allowing the pin to float high, set low by forcing it low */
#define i2c_set_scl_high() (DIRA &= ~scl_mask)
#define i2c_set_scl_low() (DIRA |= scl_mask)
#define i2c_set_sda_high() (DIRA &= ~sda_mask)
#define i2c_set_sda_low() (DIRA |= sda_mask)
#endif
/* i2c state information */
static _COGMEM uint32_t scl_mask;
static _COGMEM uint32_t sda_mask;
static _COGMEM uint32_t half_cycle;
static _COGMEM volatile I2C_MAILBOX *mailbox;
static _NATIVE void i2cStart(void);
static _NATIVE void i2cStop(void);
static _NATIVE int i2cSendByte( uint8_t byte);
static _NATIVE uint8_t i2cReceiveByte(int acknowledge);
_NAKED int main(void)
{
I2C_INIT *init = (I2C_INIT *)PAR;
I2C_CMD cmd;
uint8_t *p;
uint32_t count;
/* get the COG initialization parameters */
scl_mask = 1 << init->scl;
sda_mask = 1 << init->sda;
half_cycle = init->ticks_per_cycle >> 1;
mailbox = init->mailbox;
/* make sure the delta doesn't get too small */
if (half_cycle > MINIMUM_OVERHEAD)
half_cycle -= MINIMUM_OVERHEAD;
/* tell the caller that we're done with initialization */
mailbox->cmd = I2C_CMD_IDLE;
#ifdef PARALLAX_I2C_BUS
/* initialize the i2c pins */
OUTA |= scl_mask;
OUTA &= ~sda_mask;
DIRA |= scl_mask;
DIRA &= ~sda_mask;
#else
/* initialize the i2c pins */
DIRA &= ~scl_mask;
DIRA &= ~sda_mask;
OUTA &= ~scl_mask;
OUTA &= ~sda_mask;
#endif
/* handle requests */
for (;;) {
uint32_t sts;
/* wait for the next request */
while ((cmd = mailbox->cmd) == I2C_CMD_IDLE)
;
/* dispatch on the command code */
switch (cmd) {
case I2C_CMD_SEND:
case I2C_CMD_SEND_MORE:
p = mailbox->buffer;
count = mailbox->count;
sts = I2C_OK;
if (cmd == I2C_CMD_SEND) {
i2cStart();
if (i2cSendByte(mailbox->hdr) != 0) {
sts = I2C_ERR_SEND_HDR;
break;
}
}
while (count > 0) {
if (i2cSendByte(*p++) != 0) {
sts = I2C_ERR_SEND;
break;
}
--count;
}
if (mailbox->stop)
i2cStop();
break;
case I2C_CMD_RECEIVE:
case I2C_CMD_RECEIVE_MORE:
p = mailbox->buffer;
count = mailbox->count;
sts = I2C_OK;
if (cmd == I2C_CMD_RECEIVE) {
i2cStart();
if (i2cSendByte(mailbox->hdr) != 0) {
sts = I2C_ERR_RECEIVE_HDR;
break;
}
}
while (count > 0) {
int byte = i2cReceiveByte(count != 1);
if (byte < 0) {
sts = I2C_ERR_RECEIVE;
break;
}
*p++ = byte;
--count;
}
if (mailbox->stop)
i2cStop();
break;
default:
sts = I2C_ERR_UNKNOWN_CMD;
break;
}
mailbox->sts = sts;
mailbox->cmd = I2C_CMD_IDLE;
}
return 0;
}
static _NATIVE void i2cStart(void)
{
i2c_set_scl_high();
i2c_set_sda_high();
waitcnt(CNT + half_cycle);
i2c_set_sda_low();
waitcnt(CNT + half_cycle);
i2c_set_scl_low();
}
static _NATIVE void i2cStop(void)
{
/* scl and sda should be low on entry */
waitcnt(CNT + half_cycle);
i2c_set_scl_high();
i2c_set_sda_high();
}
static _NATIVE int i2cSendByte(uint8_t byte)
{
int count, result;
/* send the byte, high bit first */
for (count = 8; --count >= 0; ) {
if (byte & 0x80)
i2c_set_sda_high();
else
i2c_set_sda_low();
waitcnt(CNT + half_cycle);
i2c_set_scl_high();
waitcnt(CNT + half_cycle);
i2c_set_scl_low();
byte <<= 1;
}
/* receive the acknowledgement from the slave */
i2c_set_sda_high();
waitcnt(CNT + half_cycle);
i2c_set_scl_high();
result = (INA & sda_mask) != 0;
waitcnt(CNT + half_cycle);
i2c_set_scl_low();
i2c_set_sda_low();
return result;
}
static _NATIVE uint8_t i2cReceiveByte(int acknowledge)
{
uint8_t byte = 0;
int count;
i2c_set_sda_high();
for (count = 8; --count >= 0; ) {
byte <<= 1;
waitcnt(CNT + half_cycle);
i2c_set_scl_high();
byte |= (INA & sda_mask) ? 1 : 0;
waitcnt(CNT + half_cycle);
i2c_set_scl_low();
}
// acknowledge
if (acknowledge)
i2c_set_sda_low();
else
i2c_set_sda_high();
waitcnt(CNT + half_cycle);
i2c_set_scl_high();
waitcnt(CNT + half_cycle);
i2c_set_scl_low();
i2c_set_sda_low();
return byte;
}
#endif // __PROPELLER2__
FYI, this is an i2c driver written using COG mode in PropGCC.
I agree that being able to use C control structures is nice, but other than that, your driver illustrates why I don't believe COG mode will be all that useful. Almost every line of your C code corresponds to a one or two fairly simple PASM instructions - it could in fact be written directly in PASM and look very much the same - except that in PASM you wouldn't normally write it that way, because it's more efficient not to. But even if you structured it the same way specifically for comparison or educational purposes, the PASM version would probably be both faster and smaller. How big is this C version when compiled?
Also (correct me if I'm wrong) but I presume that the various non-C keywords like _NATIVE and _NAKED are to tell the C compiler to leave out the usual subroutine linkage stuff (presumably because they would just increase the size of the executable for no benefit). If that's so, and it turns out you are relying on these adornments to make it work, then you are not really using C at all - you are defining a new C dialect. In the case of your I2C driver, I'd be fairly sure it would work even if you left those keywords out - but for a more complex driver you would probably have to use them. Or have I misunderstood what they are for?
As usual, my approach will be slightly different - I will just use unadorned C code, and rely on the optimizer to strip out any unnecessary stuff, and also inline functions where possible. If it can't be made to work that way, then I'd rather resort to PASM than introduce a new "dialect" of C specifically for writing drivers.
I agree that being able to use C control structures is nice, but other than that, your driver illustrates why I don't believe COG mode will be all that useful. Almost every line of your C code corresponds to a one or two fairly simple PASM instructions - it could in fact be written directly in PASM and look very much the same - except that in PASM you wouldn't normally write it that way, because it's more efficient not to. But even if you structured it the same way specifically for comparison or educational purposes, the PASM version would probably be both faster and smaller. How big is this C version when compiled?
This can be said of any code written in C. You can write it more efficiently in assembler if you know how to do that. However, the result is often less easy to read and less maintainable. If you want ultimate efficiency you shouldn't be using C at all.
Also (correct me if I'm wrong) but I presume that the various non-C keywords like _NATIVE and _NAKED are to tell the C compiler to leave out the usual subroutine linkage stuff (presumably because they would just increase the size of the executable for no benefit). If that's so, and it turns out you are relying on these adornments to make it work, then you are not really using C at all - you are defining a new C dialect. In the case of your I2C driver, I'd be fairly sure it would work even if you left those keywords out - but for a more complex driver you would probably have to use them. Or have I misunderstood what they are for?
Yes, they control the linkages used for subroutine calls. You're right that they aren't standard C. So what? I suspect that ever interrupt service routine you find in the Linux kernel will have some sort of decorations like that. Are you saying the Linux kernel isn't written in C? I don't understand your aversion to C extensions to help create efficient code in time and space critical sections. It is common practices almost everywhere except where Catalina is in use.
As usual, my approach will be slightly different - I will just use unadorned C code, and rely on the optimizer to strip out any unnecessary stuff, and also inline functions where possible. If it can't be made to work that way, then I'd rather resort to PASM than introduce a new "dialect" of C specifically for writing drivers.
That's your choice and there is nothing wrong with it. I don't see any big advantage to it though. Certainly, the PASM is not portable so what's the difference if C code written to do the same thing is not portable?
It's only recently the Clang guys managed to compile a kernel with Clang as they had to do a lot of work to add those extensions to their C compiler.
Okay, it's written in an extended version of C but it certainly isn't an entirely new language. Most of it is undecorated C. Any C programmer would find it easy to read. Well, maybe not easy but that's not because of the extensions. It's because the code is fairly opaque in places. I think the point is that hobbling the language by leaving out anything other than "standard C" doesn't guarantee that code written will be portable or even understandable. That's up to the programmer in any language. The best way to manage this is to separate the generic code from the hardware-specific code and define a clean interface between the two. Then write the generic code in standard C and who cares how the hardware-specific code is written. It could be in decorated C or assembly or Pascal or whatever as long as the functions that it defines can be called from the generic C code. It could be that many Propeller programs consist almost entirely of Propeller-specific code. In that case there is no way to make the code generic or portable no matter what language you write it in. In that case, PASM, Spin, decorated C, they all do the job.
You can certainly use a CUSTOM platform with the debugger - the word "target" in the blackcat document should really be "target package" - there are three such packages provided with Catalina - target (the default), basic (a smaller one intended for embedded use which has no HMI drivers) and minimal (an absolutely minimal one) - or you can create your own.
Only the default and basic target packages have debug capabilities - I removed them from the minimal target to reduce its complexity. So if you were to create your own target package, it may or may not have debug capabilities.
But there is no reason your CUSTOM platform should not work with the debugger (I just tried it here, and it does). It must be that something in there is not correctly configured for your board (most likely in the Custom_DEF.inc file). If you post a copy of it I will try and see what it might be, but if the DEMO platform works, another alternative is to do what you suggest and simply copy all the DEMO files to overwrite the CUSTOM files, and then edit those to suit your platform instead.
Now I'am lost. What is default,basic,minimal ? Target package ?
In Catalina there is too much under the hood. That was the main reason why I have always avoided Catalina. The debugger is the reason for trying to dive in.
I know now why the debugger did not work. You have to activate 'No HMI plugin, disable mouse....'- build options, then Custom platform works.
What else is activated, what I do not need/want ?
What I miss in the manual is a table with (all) the possibilities (with codesizes).
But maybe it is my bad english, why I have problems to follow the manual.
1) Minimizing code size is not always a prime requirement. If it fit's in there it's good enough.
2) Maximizing execution speed is not always a prime requirement. It it's fast enough we're good to go.
3) Not everybody wants to have to pick up the PASM manual and get to grips with that.
4) The C code can often be developed on a PC and tested with a test harness.
5) The C code will be more maintainable. Even others who know almost nothing about a Propeller or PASM will be able to tweak with it.
Especially point 3 and 5 !
C can be a very good macro assembler. But this depends on the compiler. gcc is not a good example. I like to see how you can handle this with lcc.
Dialect:
Every microcontroller-compiler has it's special keywords (pragmas........). Therefore are defines and precompiler switches to make the code portabel.
C can be a very good macro assembler. But this depends on the compiler. gcc is not a good example. I like to see how you can handle this with lcc.
Why is GCC not a good example? It does a pretty decent job of optimizing I think. I'm sure there are better but is LCC really better at optimization? I had thought LCC was pretty much abandoned a long time ago. Is it still being actively developed? If so, is Catalina on the latest release?
Why is GCC not a good example? It does a pretty decent job of optimizing I think. I'm sure there are better but is LCC really better at optimization? I had thought LCC was pretty much abandoned a long time ago. Is it still being actively developed? If so, is Catalina on the latest release?
My problem with gcc is, that without optimisation you have things like mov r1,r1. With optimisation the code is scrambled (too much optimisation). There is nothing between. But this will maybe the case with lcc too. I think only a compiler especially written for the controller can do this right.
My problem with gcc is, that without optimisation you have things like mov r1,r1. With optimisation the code is scrambled (too much optimisation). There is nothing between. But this will maybe the case with lcc too. I think only a compiler especially written for the controller can do this right.
Yes, it is true that GCC without optimization is pretty much useless except on a machine with huge resources. You need some level of optimization. And you're also right that the code tends to get scrambled a lot although the resulting code usually runs much faster and is smaller.
We don't disagree! I didn't say that gcc do not make fast code. But it is not the code you have written.
But we shouldn't discuss this here. I know it's my fault. I should not have written my comment.
We don't disagree! I didn't say that gcc do not make fast code. But it is not the code you have written.
But we shouldn't discuss this here. I know it's my fault. I should not have written my comment.
I guess if you don't want the optimization for some module you can turn it off. Also, there are various levels. I always use -Os but maybe -O1 does less code shuffling. If you want to stick with high levels of optmization you'll need to decorate your C code with indications of what code needs to be kept in order. It's ugly but it might be better than doing without all optimization all the time just to get predictable code in the few places where you really need it.
Now I'am lost. What is default,basic,minimal ? Target package ?
In Catalina there is too much under the hood. That was the main reason why I have always avoided Catalina. The debugger is the reason for trying to dive in.
If you have been following the other recent discussion in this thread, you may begin to see why Catalina has so much "under the hood". The alternative is to come up with a language that looks like C, but for which you have to know various tricks and traps before you can program in it successfully - this is the approach that GCC has always taken (not just PropGCC, it is true of most flavors of GCC) - they see no reason not to extend the C language in various ways, until C code written for the GCC compiler is now difficult to port to other C compilers.
With Catalina, if you have one of the fully supported platforms (such as the C3), you don't need to know anything about what is under the hood - if you have any book on ANSI standard C, any of the examples in the book should compile and run with no modification. And this remains true even when you you start to get more adventurous, or realize you need to change to another memory model, such as the CMM or XMM memory models (because you have run out of space) or now the COG memory model (because you need it to run as fast as possible).
Of course, if you need to add a platform, then you need to know a just a little about what is under the hood. If you want to create your own plugins, you will need to know more. And if you want to create your own target package, you will need to know quite a lot more.
Now, to get back to "target packages". Each one is just a directory under the main Catalina directory, and it contains essentially all the necessary C run-time infrastructure - various plugins, platform, memory access APIs, program loaders, etc, etc (basically, anything required to get your program actually loaded and running except for the C compiler itself, or the C libraries). Each target package is self-contained. If you never use the "minimal" or "basic" packages, you can delete them. Maybe I should even make the installation of these other target packages optional during installation of Catalina.
These target packages are described starting on page 91 of the Catalina Reference Manual - here are some excerpts:
The default Target Package (in sub-directory target) can be used with any of the supported Propeller base platforms (i.e. HYDRA, HYBRID, C3 etc). It also includes the default CUSTOM platform, which is suitable for nearly any Propeller equipped with a 5Mhz crystal.
The basic Catalina Target Package (in sub-directory basic) provides support for only one single Propeller platform.
The minimal Catalina Target Package (in sub-directory minimal) is an intentionally minimalist target package. It provides support for only one single Propeller platform.
Unless you have specific needs (such as developing your own plugins, or developing a deeply embedded application) you will probably only ever use the target package in the sub-directory target (this is the default unless you use the -T command line option to specify something else).
I know now why the debugger did not work. You have to activate 'No HMI plugin, disable mouse....'- build options, then Custom platform works.
What else is activated, what I do not need/want ?
Aha! What is happening is that your platform by default is including a serial HMI driver, which is using the same pins the debugger wants to use for its serial comms. You have found the correct answer, which is to either specify NO_HMI, or choose another type of HMI (e.g. VGA or TV). If you need to use a serial HMI, you can still use the debugger, but you need to tell the program to use a different set of pins for the debugger serial comms, and you have to have two USB connections to your platform (I have to do this on the HYDRA or HYBRID, both of which cannot use the standard serial port when XMM memory is in use - instead, I use the mouse port or a keyboard port, with a special cable described on page 23 of the Catalina Reference Manual).
What I miss in the manual is a table with (all) the possibilities (with codesizes).
But maybe it is my bad english, why I have problems to follow the manual.
There is a discussion on code size in the Catalina Reference Manual starting on page 118 ("A Note about Catalina Code Sizes"). However, it is mainly concerned with reducing code size, and it really only covers LMM and CMM modes - it does not cover all the various XMM modes.
A table of all possible combinations of even a simple example program compiled with all possible memory models, loader options, XMM configurations, HMI options, and optimizer levels would be pages long (on the C3, for example, there are 4 different ways just to use the same XMM memory) - but it is a good idea, so I will see if I can come up with a sensible subset to include in the next release.
My problem with gcc is, that without optimisation you have things like mov r1,r1. With optimisation the code is scrambled (too much optimisation). There is nothing between. But this will maybe the case with lcc too. I think only a compiler especially written for the controller can do this right.
Actually if you look at raw (i.e. un-optimized) LCC output compared to un-optimized GCC output, LCC output is much cleaner, simpler and more efficient. I was amazed to find this - the LCC compiler is actually quite good at basic code generation - which helps explain why it is still used as the basis for many compilers - both commercial and free ones - even after all this time.
But although GCC is a pretty ordinary at basic code generation, it makes up for it by having an astonishingly good optimizer, whereas I have had to write my own one for LCC. I would be the first to admit mine is nowhere near as good as the GCC one - but it is getting better all the time!
I didn't say that gcc do not make fast code. But it is not the code you have written.
That's an interesting statement. Surely the whole point of a high level language is that one did not want to write that code? One wanted to express an intent at a higher level of abstraction and have a compiler figure out how to do it in the best way possible.
This complaint usually comes from people who write something like
int main(int argc, char*argv[]) {
int a = 2;
int b = 3;
int c;
c = a + b;
return(0);
}
"Whaa, where is the code? This compiles to two instructions, make a zero and return. That's not what I wrote! Where is my addition code?"
Thing is, that is exactly what you wrote. That code says "Make me a function that returns a integer of zero and has no effect on anything else". The compiler dutifully makes such a function.
Admittedly this kind of optimization, removal and rearrangement of code, might make life tricky if you expect to step through it with a debugger as there is now no one to one correspondence between lines of source and the executable instructions. Can't be helped really, except by turning optimizations off, but then your code won't fit...
Don't forget GCC has options like "-std=c99" which will disallow all those GCC extensions and strictly check your code is compliant with the standard specified.
Don't forget GCC has options like "-std=c99" which will disallow all those GCC extensions and strictly check your code is compliant with the standard specified.
Yes, and I wonder how much GCC code would run correctly if you ever used it!
I could well imagine that the source code to GCC itself makes use of those extensions and hence requires that GCC be built with GCC. I suspect thought that this is not true. Turns out it has been possible to build GCC with Clang since 2009 and from a description of the process there only C99 support was required.
What about other code? Well, for example there are nearly 40,000 packages in Debian. Many of those written in C/C++. Most of those designed to be portable, my impression is not much code requires those GCC extensions.
Extensions are odd things. They break the "standards". Most people avoid them as a consequence. On the other hand they are experiments in the directions that a language might evolve. Those extensions may well end up in future standards.
I could well imagine that the source code to GCC itself makes use of those extensions and hence requires that GCC be built with GCC. I suspect thought that this is not true. Turns out it has been possible to build GCC with Clang since 2009 and from a description of the process there only C99 support was required.
What about other code? Well, for example there are nearly 40,000 packages in Debian. Many of those written in C/C++. Most of those designed to be portable, my impression is not much code requires those GCC extensions.
Extensions are odd things. They break the "standards". Most people avoid them as a consequence. On the other hand they are experiments in the directions that a language might evolve. Those extensions may well end up in future standards.
Actually, I suspect you may find that to compile GCC, Clang had to adopt many of GCC's own extensions - see here.
This document describes the language extensions provided by Clang. In addition to the language extensions listed here, Clang aims to support a broad range of GCC extensions. Please see the GCC manual for more information on these extensions.
Of course there's a lot of code written for GCC that can be compiled on other C compilers. But once you get into deeply embedded stuff, the code often becomes not just platform dependent. but compiler dependent becasue of the use of extensions. The Linux kernel is a good example, and the Arduino low level C code would probably be another. Some if this code may compile with another compiler, because it is fairly common practice to simply redefine the extension keywords to be simply null. The result may compile, but it probably won't run correctly - if it did, why would you have used the extension keywords in the first place?
And yes, I understand that sometimes extensions are necessary, and they can help improve the language. But sometimes they are not - sometimes they are simply "conveniences", and in other cases they are actually designed to "lock you in" to a specific vendor (not mentioning any names of course **cough**Micro$oft**cough**). Most of these extensions will never make their way into any standard, because each compiler will have different ones. If there is a generally recognized language deficiency, it will often be corrected in the next iteration of the language - but by redesigning the language holistically, not by using the extensions adopted by one specific compiler, (which are often really ugly!).
If you have been following the other recent discussion in this thread, you may begin to see why Catalina has so much "under the hood". The alternative is to come up with a language that looks like C, but for which you have to know various tricks and traps before you can program in it successfully - this is the approach that GCC has always taken (not just PropGCC, it is true of most flavors of GCC) - they see no reason not to extend the C language in various ways, until C code written for the GCC compiler is now difficult to port to other C compilers.
This is a very good point. It is bad to use extensions because code that compiles with one compiler will not compile with another if it uses the extensions. In fact, this has been a problem from the beginning. We should all go back to the original K&R C and stop messing around with all of the extensions that have been made over the years. Abandon the newer standards because they are nothing but standardized versions of features that were non-standard extensions to start. Let's go back to what the original inventors of the language intended.
int fact(n)
int n;
{
if (n > 2)
return (n * fact(n - 1));
else
return (n);
}
Of course there's a lot of code written for GCC that can be compiled on other C compilers. But once you get into deeply embedded stuff, the code often becomes not just platform dependent. but compiler dependent becasue of the use of extensions.
This is probably true of Catalina as well to some extent. What names did you choose for functions that interface with the hardware? Are they the same and with identical semantics to the ones chosen by PropGCC? Are the registers named in upper or lowercase? Do they have a prepended underscore? Lots of decisions get made when mapping a language onto an architecture and not all of them have to do with language extensions. Also, I believe that "#pragma" is a standard C language feature. Why should one avoid it if it's in the standard? The standard intentionally avoids specifying what can appear in a #pragma statement and leaves it to the individual compiler. This is a recognition by the standards committee that any practical implementation of C will need some target-specific features. Most (maybe all) of the PropGCC extensions for the Propeller are just pragmas wrapped in macros and hence follow the ANSI standard. Anyway, this discussion has been beaten to death by now. No one is going to change anyone else's opinion on this I'm afraid.
Edit: Ugh. While what I said about #pragma is true, it isn't how some of the PropGCC decorations are implemented. They use the __attribute__ syntax which is GCC specific. Sorry! In any case, I think the existence of #pragma to allow target- or compiler-specific extensions suggests that the ANSI people recognized the need for such things. The GCC __attribute__ syntax just carries that one step further. It wouldn't surprise me if something like that ended up in a future version of the ANSI standard.
Since you like to argue, I'll ask another question. :-)
Why do you consider writing in PASM to be an acceptable way to introduce Propeller-specific code but you don't consider decorated C code to be acceptable? In both cases you end up with code that is not portable but the decorated C code may be more readable by someone who knows C but not PASM. It seems to me that if you really want all code to be portable you would also deprecate any PASM code and only promote use of stdin/stdout/stderr and the standard C runtime library code for all programs.
Since you like to argue, I'll ask another question. :-)
Why do you consider writing in PASM to be an acceptable way to introduce Propeller-specific code but you don't consider decorated C code to be acceptable? In both cases you end up with code that is not portable but the decorated C code may be more readable by someone who knows C but not PASM. It seems to me that if you really want all code to be portable you would also deprecate any PASM code and only promote use of stdin/stdout/stderr and the standard C runtime library code for all programs.
Simple - I don't think "adorned" or "decorated" C can ever replace PASM. For instance, I simply cannot imagine it is ever going to be possible to write a High Resolution VGA driver plus associated Vector Graphics support functions in anything but PASM. Even using intensively hand-crafted and optimized PASM, Catalina's version takes 5 co-operating cogs! Try writing that in C!
So to address the entire spectrum of Propeller development - i.e. between high-level alorithms (for which "plain old" unadorned C is the most appropriate langugage) and low level stuff (for which "plain old" PASM is the most appropriate language), I simply don't see that it makes sense to introduce a third language option (i.e "addorned" or "decorated" C). There is no "gap" between "plain old" C and PASM that needs addressing, and this third option can never completely replace either the one or the other, so why introduce it?
Simple - I don't think "adorned" or "decorated" C can ever replace PASM. For instance, I simply cannot imagine it is ever going to be possible to write a High Resolution VGA driver plus associated Vector Graphics support functions in anything but PASM. Even using intensively hand-crafted and optimized PASM, Catalina's version takes 5 co-operating cogs! Try writing that in C!
So to address the entire spectrum of Propeller development - i.e. between high-level alorithms (for which "plain old" unadorned C is the most appropriate langugage) and low level stuff (for which "plain old" PASM is the most appropriate language), I simply don't see that it makes sense to introduce a third language option (i.e "addorned" or "decorated" C). There is no "gap" between "plain old" C and PASM that needs addressing, and this third option can never completely replace either the one or the other, so why introduce it?
Ross.
Well, I guess that's a matter of opinion. In Linux there is very little assembly code and much of the hardware interface code is written in C with a few "decorations" like "volatile". In fact, volatile is standard ANSI C so I guess it isn't really a decoration at all. In many cases, that is all that is necessary. The other decorations that my i2c driver required are because of the extremely limited amount of directly addressable memory in COG mode. In fact those aren't even required if you are willing to give the COG mode program a small amount of hub memory to use as a stack. I wasn't willing to do that so I put in the decorations. For complex drivers PASM is a better choice than COG mode C simply because there is so little space in COG memory. However, I think it's useful to provide COG mode as an alternative for drivers like the i2c example that I posted that will fit in COG memory even when written in C. I guess you agree since you're adding COG mode to Catalina as well.
That's an interesting statement. Surely the whole point of a high level language is that one did not want to write that code? One wanted to express an intent at a higher level of abstraction and have a compiler figure out how to do it in the best way possible.
This complaint usually comes from people who write something like
int main(int argc, char*argv[]) {
int a = 2;
int b = 3;
int c;
c = a + b;
return(0);
}
"Whaa, where is the code? This compiles to two instructions, make a zero and return. That's not what I wrote! Where is my addition code?"
Thing is, that is exactly what you wrote. That code says "Make me a function that returns a integer of zero and has no effect on anything else". The compiler dutifully makes such a function.
Admittedly this kind of optimization, removal and rearrangement of code, might make life tricky if you expect to step through it with a debugger as there is now no one to one correspondence between lines of source and the executable instructions. Can't be helped really, except by turning optimizations off, but then your code won't fit...
Heater, you like this, right? Make a stupid example to make others small and yourself big.
But yes, in my opinion a compiler should only optimise the usage of assembler instruction (translate the code to as less as possible instructions or to faster instructions, if possible, depending on the optimising settings), but never cut or rearrange the code.
I am the programmer and if I think the code is necessary this way, then it is. And if I write your example then I am stupid and the compiler can say me that (Warning), if I told it to do so.
And for gcc I never found this setting. But maybe I'm just too stupid to read the hundreds of pages of gcc manual.
But yes, in my opinion a compiler should only optimise the usage of assembler instruction (translate the code to as less as possible instructions or to faster instructions, if possible, depending on the optimising settings), but never cut or rearrange the code.
I am the programmer and if I think the code is necessary this way, then it is. And if I write your example then I am stupid and the compiler can say me that (Warning), if I told it to do so.
And for gcc I never found this setting. But maybe I'm just too stupid to read the hundreds of pages of gcc manual.
You can, of course, turn off all optimization and then you get exactly what you wrote. However, as many have mentioned, the code is really *too* redundant to be of much use. There might not be an optimization option that gets rid of the redundant code but doesn't move things around. If that's the case then I guess GCC is not for you. Maybe Ross will say whether Catalina does any code reordering. If not, then it would be a good option. Luckily we have both choices on the Propeller.
Heater, you like this, right? Make a stupid example to make others small and yourself big.
I'm sorry my post came over that way. Perhaps I phrased things a bit harshly.
It was a serious question. With the intention of eliciting what it is people expect compilers to do. Thank you for providing your views.
This statement "I am the programmer and if I think the code is necessary this way, then it is." is interesting. And I don't necessarily disagree. It is however the opposite of what has been going on in compiler technology ever since people started building compilers.
Of course normally a compiler has options to force what you are talking about. "-O0" will do it, in the case of GCC.
Which brings us to:
And for gcc I never found this setting. But maybe I'm just too stupid to read the hundreds of pages of gcc manual.
It is certainly true that GCC has a billion options and no normal human would know or use all of them. I guess they are all there for good reason.
GCC is a beast. It supports lot's of different languages as input and generates code for lot's of different architectures as output. Quite amazing really. Much of what is in the middle is there to enable optimization to be done. Work which is common to all languages and architectures. Which, it seems, you don't want.
A question? How would you feel if Chip put an optimization into Spin that did a similar "dead code" removal job? How would you know? Why would it matter?
Actually, Spin already has such an optimization. If you include the same object many times only one copy of the code is placed into your binary.
I think the larger idea is that the compiler should generate the best possible code to compute the result you describe in your source code. If it can prove that the order of operations doesn't affect the outcome in some particular case, it can move the code around to make best use of registers, etc. This works pretty much all the time in application code. Where it fails is in driver code where the order of operations might matter. There are ways around this in C using, as Ross would point out, non-standard extensions. There is also the option turning off optimization for those functions or even just writing those functions in assembly language. Since all of those options exist, I don't see why you would want to tie the hands of the optimizer and probably end up with slower and/or larger code in cases where the exact ordering of operations doesn't matter.
Comments
I agree that being able to use C control structures is nice, but other than that, your driver illustrates why I don't believe COG mode will be all that useful. Almost every line of your C code corresponds to a one or two fairly simple PASM instructions - it could in fact be written directly in PASM and look very much the same - except that in PASM you wouldn't normally write it that way, because it's more efficient not to. But even if you structured it the same way specifically for comparison or educational purposes, the PASM version would probably be both faster and smaller. How big is this C version when compiled?
Also (correct me if I'm wrong) but I presume that the various non-C keywords like _NATIVE and _NAKED are to tell the C compiler to leave out the usual subroutine linkage stuff (presumably because they would just increase the size of the executable for no benefit). If that's so, and it turns out you are relying on these adornments to make it work, then you are not really using C at all - you are defining a new C dialect. In the case of your I2C driver, I'd be fairly sure it would work even if you left those keywords out - but for a more complex driver you would probably have to use them. Or have I misunderstood what they are for?
As usual, my approach will be slightly different - I will just use unadorned C code, and rely on the optimizer to strip out any unnecessary stuff, and also inline functions where possible. If it can't be made to work that way, then I'd rather resort to PASM than introduce a new "dialect" of C specifically for writing drivers.
I realize you will probably disagree!
Ross.
I disagree!
1) Minimizing code size is not always a prime requirement. If it fit's in there it's good enough.
2) Maximizing execution speed is not always a prime requirement. It it's fast enough we're good to go.
3) Not everybody wants to have to pick up the PASM manual and get to grips with that.
4) The C code can often be developed on a PC and tested with a test harness.
5) The C code will be more maintainable. Even others who know almost nothing about a Propeller or PASM will be able to tweak with it.
@David, Correct. The Linux kernel is not written in C. Might look like C but it relies on a ton of extensions provided by GCC. A few of which are described here: http://www.ibm.com/developerworks/library/l-gcc-hacks/index.html
It's only recently the Clang guys managed to compile a kernel with Clang as they had to do a lot of work to add those extensions to their C compiler.
In Catalina there is too much under the hood. That was the main reason why I have always avoided Catalina. The debugger is the reason for trying to dive in.
I know now why the debugger did not work. You have to activate 'No HMI plugin, disable mouse....'- build options, then Custom platform works.
What else is activated, what I do not need/want ?
What I miss in the manual is a table with (all) the possibilities (with codesizes).
But maybe it is my bad english, why I have problems to follow the manual.
Especially point 3 and 5 !
C can be a very good macro assembler. But this depends on the compiler. gcc is not a good example. I like to see how you can handle this with lcc.
Dialect:
Every microcontroller-compiler has it's special keywords (pragmas........). Therefore are defines and precompiler switches to make the code portabel.
And this is my point 6 -> Portability
I hope you make your 'cog-mode'!
With debugger ?
My problem with gcc is, that without optimisation you have things like mov r1,r1. With optimisation the code is scrambled (too much optimisation). There is nothing between. But this will maybe the case with lcc too. I think only a compiler especially written for the controller can do this right.
But we shouldn't discuss this here. I know it's my fault. I should not have written my comment.
With Catalina, if you have one of the fully supported platforms (such as the C3), you don't need to know anything about what is under the hood - if you have any book on ANSI standard C, any of the examples in the book should compile and run with no modification. And this remains true even when you you start to get more adventurous, or realize you need to change to another memory model, such as the CMM or XMM memory models (because you have run out of space) or now the COG memory model (because you need it to run as fast as possible).
Of course, if you need to add a platform, then you need to know a just a little about what is under the hood. If you want to create your own plugins, you will need to know more. And if you want to create your own target package, you will need to know quite a lot more.
Now, to get back to "target packages". Each one is just a directory under the main Catalina directory, and it contains essentially all the necessary C run-time infrastructure - various plugins, platform, memory access APIs, program loaders, etc, etc (basically, anything required to get your program actually loaded and running except for the C compiler itself, or the C libraries). Each target package is self-contained. If you never use the "minimal" or "basic" packages, you can delete them. Maybe I should even make the installation of these other target packages optional during installation of Catalina.
These target packages are described starting on page 91 of the Catalina Reference Manual - here are some excerpts:
- The default Target Package (in sub-directory target) can be used with any of the supported Propeller base platforms (i.e. HYDRA, HYBRID, C3 etc). It also includes the default CUSTOM platform, which is suitable for nearly any Propeller equipped with a 5Mhz crystal.
- The basic Catalina Target Package (in sub-directory basic) provides support for only one single Propeller platform.
- The minimal Catalina Target Package (in sub-directory minimal) is an intentionally minimalist target package. It provides support for only one single Propeller platform.
Unless you have specific needs (such as developing your own plugins, or developing a deeply embedded application) you will probably only ever use the target package in the sub-directory target (this is the default unless you use the -T command line option to specify something else).Aha! What is happening is that your platform by default is including a serial HMI driver, which is using the same pins the debugger wants to use for its serial comms. You have found the correct answer, which is to either specify NO_HMI, or choose another type of HMI (e.g. VGA or TV). If you need to use a serial HMI, you can still use the debugger, but you need to tell the program to use a different set of pins for the debugger serial comms, and you have to have two USB connections to your platform (I have to do this on the HYDRA or HYBRID, both of which cannot use the standard serial port when XMM memory is in use - instead, I use the mouse port or a keyboard port, with a special cable described on page 23 of the Catalina Reference Manual).
There is a discussion on code size in the Catalina Reference Manual starting on page 118 ("A Note about Catalina Code Sizes"). However, it is mainly concerned with reducing code size, and it really only covers LMM and CMM modes - it does not cover all the various XMM modes.
A table of all possible combinations of even a simple example program compiled with all possible memory models, loader options, XMM configurations, HMI options, and optimizer levels would be pages long (on the C3, for example, there are 4 different ways just to use the same XMM memory) - but it is a good idea, so I will see if I can come up with a sensible subset to include in the next release.
Ross.
Actually if you look at raw (i.e. un-optimized) LCC output compared to un-optimized GCC output, LCC output is much cleaner, simpler and more efficient. I was amazed to find this - the LCC compiler is actually quite good at basic code generation - which helps explain why it is still used as the basis for many compilers - both commercial and free ones - even after all this time.
But although GCC is a pretty ordinary at basic code generation, it makes up for it by having an astonishingly good optimizer, whereas I have had to write my own one for LCC. I would be the first to admit mine is nowhere near as good as the GCC one - but it is getting better all the time!
Ross.
This complaint usually comes from people who write something like "Whaa, where is the code? This compiles to two instructions, make a zero and return. That's not what I wrote! Where is my addition code?"
Thing is, that is exactly what you wrote. That code says "Make me a function that returns a integer of zero and has no effect on anything else". The compiler dutifully makes such a function.
Admittedly this kind of optimization, removal and rearrangement of code, might make life tricky if you expect to step through it with a debugger as there is now no one to one correspondence between lines of source and the executable instructions. Can't be helped really, except by turning optimizations off, but then your code won't fit...
Don't forget GCC has options like "-std=c99" which will disallow all those GCC extensions and strictly check your code is compliant with the standard specified.
Yes, and I wonder how much GCC code would run correctly if you ever used it!
Ross.
I could well imagine that the source code to GCC itself makes use of those extensions and hence requires that GCC be built with GCC. I suspect thought that this is not true. Turns out it has been possible to build GCC with Clang since 2009 and from a description of the process there only C99 support was required.
What about other code? Well, for example there are nearly 40,000 packages in Debian. Many of those written in C/C++. Most of those designed to be portable, my impression is not much code requires those GCC extensions.
Extensions are odd things. They break the "standards". Most people avoid them as a consequence. On the other hand they are experiments in the directions that a language might evolve. Those extensions may well end up in future standards.
Actually, I suspect you may find that to compile GCC, Clang had to adopt many of GCC's own extensions - see here.
Of course there's a lot of code written for GCC that can be compiled on other C compilers. But once you get into deeply embedded stuff, the code often becomes not just platform dependent. but compiler dependent becasue of the use of extensions. The Linux kernel is a good example, and the Arduino low level C code would probably be another. Some if this code may compile with another compiler, because it is fairly common practice to simply redefine the extension keywords to be simply null. The result may compile, but it probably won't run correctly - if it did, why would you have used the extension keywords in the first place?
And yes, I understand that sometimes extensions are necessary, and they can help improve the language. But sometimes they are not - sometimes they are simply "conveniences", and in other cases they are actually designed to "lock you in" to a specific vendor (not mentioning any names of course **cough**Micro$oft**cough**). Most of these extensions will never make their way into any standard, because each compiler will have different ones. If there is a generally recognized language deficiency, it will often be corrected in the next iteration of the language - but by redesigning the language holistically, not by using the extensions adopted by one specific compiler, (which are often really ugly!).
Ross.
Edit: Ugh. While what I said about #pragma is true, it isn't how some of the PropGCC decorations are implemented. They use the __attribute__ syntax which is GCC specific. Sorry! In any case, I think the existence of #pragma to allow target- or compiler-specific extensions suggests that the ANSI people recognized the need for such things. The GCC __attribute__ syntax just carries that one step further. It wouldn't surprise me if something like that ended up in a future version of the ANSI standard.
True - but it's fun arguing!
Ross.
Why do you consider writing in PASM to be an acceptable way to introduce Propeller-specific code but you don't consider decorated C code to be acceptable? In both cases you end up with code that is not portable but the decorated C code may be more readable by someone who knows C but not PASM. It seems to me that if you really want all code to be portable you would also deprecate any PASM code and only promote use of stdin/stdout/stderr and the standard C runtime library code for all programs.
Simple - I don't think "adorned" or "decorated" C can ever replace PASM. For instance, I simply cannot imagine it is ever going to be possible to write a High Resolution VGA driver plus associated Vector Graphics support functions in anything but PASM. Even using intensively hand-crafted and optimized PASM, Catalina's version takes 5 co-operating cogs! Try writing that in C!
So to address the entire spectrum of Propeller development - i.e. between high-level alorithms (for which "plain old" unadorned C is the most appropriate langugage) and low level stuff (for which "plain old" PASM is the most appropriate language), I simply don't see that it makes sense to introduce a third language option (i.e "addorned" or "decorated" C). There is no "gap" between "plain old" C and PASM that needs addressing, and this third option can never completely replace either the one or the other, so why introduce it?
Ross.
Heater, you like this, right? Make a stupid example to make others small and yourself big.
But yes, in my opinion a compiler should only optimise the usage of assembler instruction (translate the code to as less as possible instructions or to faster instructions, if possible, depending on the optimising settings), but never cut or rearrange the code.
I am the programmer and if I think the code is necessary this way, then it is. And if I write your example then I am stupid and the compiler can say me that (Warning), if I told it to do so.
And for gcc I never found this setting. But maybe I'm just too stupid to read the hundreds of pages of gcc manual.
It was a serious question. With the intention of eliciting what it is people expect compilers to do. Thank you for providing your views.
This statement "I am the programmer and if I think the code is necessary this way, then it is." is interesting. And I don't necessarily disagree. It is however the opposite of what has been going on in compiler technology ever since people started building compilers.
Of course normally a compiler has options to force what you are talking about. "-O0" will do it, in the case of GCC.
Which brings us to: It is certainly true that GCC has a billion options and no normal human would know or use all of them. I guess they are all there for good reason.
GCC is a beast. It supports lot's of different languages as input and generates code for lot's of different architectures as output. Quite amazing really. Much of what is in the middle is there to enable optimization to be done. Work which is common to all languages and architectures. Which, it seems, you don't want.
A question? How would you feel if Chip put an optimization into Spin that did a similar "dead code" removal job? How would you know? Why would it matter?
Actually, Spin already has such an optimization. If you include the same object many times only one copy of the code is placed into your binary.