// packet logs and thus if an update is lost it is never repeated, this makes
// csqc entities useless at the moment.
-void EntityFrameCSQC_WriteState (sizebuf_t *msg, int number, qboolean doupdate, qboolean *sectionstarted)
+void EntityFrameCSQC_WriteState (sizebuf_t *msg, int maxsize, int number, qboolean doupdate, qboolean *sectionstarted)
{
int version;
prvm_eval_t *val, *val2;
version = 0;
if (doupdate)
{
- if (msg->cursize + !*sectionstarted + 2 + 1 + 2 > msg->maxsize)
+ if (msg->cursize + !*sectionstarted + 2 + 1 + 2 > maxsize)
return;
val2 = PRVM_EDICTFIELDVALUE((&prog->edicts[number]), prog->fieldoffsets.Version);
version = (int)val2->_float;
if(!*sectionstarted)
MSG_WriteByte(msg, svc_csqcentities);
MSG_WriteShort(msg, number);
+ msg->allowoverflow = true;
PRVM_G_INT(OFS_PARM0) = sv.writeentitiestoclient_cliententitynumber;
prog->globals.server->self = number;
PRVM_ExecuteProgram(val->function, "Null SendEntity\n");
+ msg->allowoverflow = false;
if(PRVM_G_FLOAT(OFS_RETURN))
{
- if (msg->cursize + 2 > msg->maxsize)
+ if (msg->cursize + 2 > maxsize)
{
// if the packet no longer has enough room to write the
// final index code that ends the message, rollback to the
{
// rollback the buffer to its state before the writes
msg->cursize = oldcursize;
+ msg->overflowed = false;
// if the function returned FALSE, simply write a remove
// this is done by falling through to the remove code below
version = 0;
return;
// if there isn't enough room to write the remove message, just return, as
// it will be handled in a later packet
- if (msg->cursize + !*sectionstarted + 2 + 2 > msg->maxsize)
+ if (msg->cursize + !*sectionstarted + 2 + 2 > maxsize)
return;
// first write the message identifier if needed
if(!*sectionstarted)
}
//[515]: we use only one array per-client for SendEntity feature
-void EntityFrameCSQC_WriteFrame (sizebuf_t *msg, int numstates, const entity_state_t *states)
+void EntityFrameCSQC_WriteFrame (sizebuf_t *msg, int maxsize, int numstates, const entity_state_t *states)
{
int i, num;
qboolean sectionstarted = false;
return;
// make sure there is enough room to store the svc_csqcentities byte,
// the terminator (0x0000) and at least one entity update
- if (msg->cursize + 32 >= msg->maxsize)
+ if (msg->cursize + 32 >= maxsize)
return;
num = 1;
// all entities between the previous entity state and this one are dead
for (;num < n->number;num++)
if(svs.clients[sv.writeentitiestoclient_clientnumber].csqcentityversion[num])
- EntityFrameCSQC_WriteState(msg, num, false, §ionstarted);
+ EntityFrameCSQC_WriteState(msg, maxsize, num, false, §ionstarted);
// update this entity
- EntityFrameCSQC_WriteState(msg, num, true, §ionstarted);
+ EntityFrameCSQC_WriteState(msg, maxsize, num, true, §ionstarted);
// advance to next entity so the next iteration doesn't immediately remove it
num++;
}
// all remaining entities are dead
for (;num < prog->num_edicts;num++)
if(svs.clients[sv.writeentitiestoclient_clientnumber].csqcentityversion[num])
- EntityFrameCSQC_WriteState(msg, num, false, §ionstarted);
+ EntityFrameCSQC_WriteState(msg, maxsize, num, false, §ionstarted);
if (sectionstarted)
{
// write index 0 to end the update (0 is never used by real entities)
}
-void EntityFrameQuake_WriteFrame(sizebuf_t *msg, int numstates, const entity_state_t *states)
+void EntityFrameQuake_WriteFrame(sizebuf_t *msg, int maxsize, int numstates, const entity_state_t *states)
{
const entity_state_t *s;
entity_state_t baseline;
}
// if the commit is full, we're done this frame
- if (msg->cursize + buf.cursize > msg->maxsize)
+ if (msg->cursize + buf.cursize > maxsize)
{
// next frame we will continue where we left off
break;
}
// (server) writes a frame to network stream
-void EntityFrame_WriteFrame(sizebuf_t *msg, entityframe_database_t *d, int numstates, const entity_state_t *states, int viewentnum)
+void EntityFrame_WriteFrame(sizebuf_t *msg, int maxsize, entityframe_database_t *d, int numstates, const entity_state_t *states, int viewentnum)
{
int i, onum, number;
entity_frame_t *o = &d->deltaframe;
EntityFrame4_ResetDatabase(d);
}
-void EntityFrame4_WriteFrame(sizebuf_t *msg, entityframe4_database_t *d, int numstates, const entity_state_t *states)
+void EntityFrame4_WriteFrame(sizebuf_t *msg, int maxsize, entityframe4_database_t *d, int numstates, const entity_state_t *states)
{
const entity_state_t *e, *s;
entity_state_t inactiveentitystate;
prvm_eval_t *val;
// if there isn't enough space to accomplish anything, skip it
- if (msg->cursize + 24 > msg->maxsize)
+ if (msg->cursize + 24 > maxsize)
return;
// prepare the buffer
}
}
// if the commit is full, we're done this frame
- if (msg->cursize + buf.cursize > msg->maxsize - 4)
+ if (msg->cursize + buf.cursize > maxsize - 4)
{
// next frame we will continue where we left off
break;
d->packetlog[i].packetnumber = 0;
}
-void EntityFrame5_WriteFrame(sizebuf_t *msg, entityframe5_database_t *d, int numstates, const entity_state_t *states, int viewentnum, int movesequence)
+void EntityFrame5_WriteFrame(sizebuf_t *msg, int maxsize, entityframe5_database_t *d, int numstates, const entity_state_t *states, int viewentnum, int movesequence)
{
const entity_state_t *n;
int i, num, l, framenum, packetlognumber, priority;
// write stat updates
if (sv.protocol != PROTOCOL_QUAKE && sv.protocol != PROTOCOL_QUAKEDP && sv.protocol != PROTOCOL_NEHAHRAMOVIE && sv.protocol != PROTOCOL_NEHAHRABJP && sv.protocol != PROTOCOL_NEHAHRABJP2 && sv.protocol != PROTOCOL_NEHAHRABJP3 && sv.protocol != PROTOCOL_DARKPLACES1 && sv.protocol != PROTOCOL_DARKPLACES2 && sv.protocol != PROTOCOL_DARKPLACES3 && sv.protocol != PROTOCOL_DARKPLACES4 && sv.protocol != PROTOCOL_DARKPLACES5)
{
- for (i = 0;i < MAX_CL_STATS && msg->cursize + 6 + 11 <= msg->maxsize;i++)
+ for (i = 0;i < MAX_CL_STATS && msg->cursize + 6 + 11 <= maxsize;i++)
{
if (host_client->statsdeltabits[i>>3] & (1<<(i&7)))
{
buf.cursize = 0;
EntityState5_WriteUpdate(num, n, d->deltabits[num], &buf);
// if the entity won't fit, try the next one
- if (msg->cursize + buf.cursize + 2 > msg->maxsize)
+ if (msg->cursize + buf.cursize + 2 > maxsize)
continue;
// write entity to the packet
SZ_Write(msg, buf.data, buf.cursize);
void Protocol_WriteStatsReliable(void);
// writes a list of quake entities to the network stream
// (or as many will fit)
-void EntityFrameQuake_WriteFrame(sizebuf_t *msg, int numstates, const entity_state_t *states);
+void EntityFrameQuake_WriteFrame(sizebuf_t *msg, int maxsize, int numstates, const entity_state_t *states);
// cleans up dead entities each frame after ReadEntity (which doesn't clear unused entities)
void EntityFrameQuake_ISeeDeadEntities(void);
// reference
void EntityFrame_AddFrame(entityframe_database_t *d, vec3_t eye, int framenum, int numentities, const entity_state_t *entitydata);
// (server) writes a frame to network stream
-void EntityFrame_WriteFrame(sizebuf_t *msg, entityframe_database_t *d, int numstates, const entity_state_t *states, int viewentnum);
+void EntityFrame_WriteFrame(sizebuf_t *msg, int maxsize, entityframe_database_t *d, int numstates, const entity_state_t *states, int viewentnum);
// (client) reads a frame from network stream
void EntityFrame_CL_ReadFrame(void);
// (client) returns the frame number of the most recent frame recieved
// updates database to account for a frame-received acknowledgment
int EntityFrame4_AckFrame(entityframe4_database_t *d, int framenum, int servermode);
// writes a frame to the network stream
-void EntityFrame4_WriteFrame(sizebuf_t *msg, entityframe4_database_t *d, int numstates, const entity_state_t *states);
+void EntityFrame4_WriteFrame(sizebuf_t *msg, int maxsize, entityframe4_database_t *d, int numstates, const entity_state_t *states);
// reads a frame from the network stream
void EntityFrame4_CL_ReadFrame(void);
void EntityFrame5_CL_ReadFrame(void);
void EntityFrame5_LostFrame(entityframe5_database_t *d, int framenum);
void EntityFrame5_AckFrame(entityframe5_database_t *d, int framenum);
-void EntityFrame5_WriteFrame(sizebuf_t *msg, entityframe5_database_t *d, int numstates, const entity_state_t *states, int viewentnum, int movesequence);
+void EntityFrame5_WriteFrame(sizebuf_t *msg, int maxsize, entityframe5_database_t *d, int numstates, const entity_state_t *states, int viewentnum, int movesequence);
extern cvar_t developer_networkentities;
void VM_CustomStats_Clear (void);
void VM_SV_UpdateCustomStats (client_t *client, prvm_edict_t *ent, sizebuf_t *msg, int *stats);
-void EntityFrameCSQC_WriteFrame (sizebuf_t *msg, int numstates, const entity_state_t *states);
+void EntityFrameCSQC_WriteFrame (sizebuf_t *msg, int maxsize, int numstates, const entity_state_t *states);
cvar_t coop = {0, "coop","0", "coop mode, 0 = no coop, 1 = coop mode, multiple players playing through the singleplayer game (coop mode also shuts off deathmatch)"};
cvar_t deathmatch = {0, "deathmatch","0", "deathmatch mode, values depend on mod but typically 0 = no deathmatch, 1 = normal deathmatch with respawning weapons, 2 = weapons stay (players can only pick up new weapons)"};
sv.sententities[s->number] = sv.sententitiesmark;
}
-void SV_WriteEntitiesToClient(client_t *client, prvm_edict_t *clent, sizebuf_t *msg)
+void SV_WriteEntitiesToClient(client_t *client, prvm_edict_t *clent, sizebuf_t *msg, int maxsize)
{
int i, numsendstates;
entity_state_t *s;
prvm_edict_t *camera;
// if there isn't enough space to accomplish anything, skip it
- if (msg->cursize + 25 > msg->maxsize)
+ if (msg->cursize + 25 > maxsize)
return;
sv.writeentitiestoclient_msg = msg;
if (sv_cullentities_stats.integer)
Con_Printf("client \"%s\" entities: %d total, %d visible, %d culled by: %d pvs %d trace\n", client->name, sv.writeentitiestoclient_stats_totalentities, sv.writeentitiestoclient_stats_visibleentities, sv.writeentitiestoclient_stats_culled_pvs + sv.writeentitiestoclient_stats_culled_trace, sv.writeentitiestoclient_stats_culled_pvs, sv.writeentitiestoclient_stats_culled_trace);
- EntityFrameCSQC_WriteFrame(msg, numsendstates, sv.writeentitiestoclient_sendstates);
+ EntityFrameCSQC_WriteFrame(msg, maxsize, numsendstates, sv.writeentitiestoclient_sendstates);
if (client->entitydatabase5)
- EntityFrame5_WriteFrame(msg, client->entitydatabase5, numsendstates, sv.writeentitiestoclient_sendstates, client - svs.clients + 1, client->movesequence);
+ EntityFrame5_WriteFrame(msg, maxsize, client->entitydatabase5, numsendstates, sv.writeentitiestoclient_sendstates, client - svs.clients + 1, client->movesequence);
else if (client->entitydatabase4)
{
- EntityFrame4_WriteFrame(msg, client->entitydatabase4, numsendstates, sv.writeentitiestoclient_sendstates);
+ EntityFrame4_WriteFrame(msg, maxsize, client->entitydatabase4, numsendstates, sv.writeentitiestoclient_sendstates);
Protocol_WriteStatsReliable();
}
else if (client->entitydatabase)
{
- EntityFrame_WriteFrame(msg, client->entitydatabase, numsendstates, sv.writeentitiestoclient_sendstates, client - svs.clients + 1);
+ EntityFrame_WriteFrame(msg, maxsize, client->entitydatabase, numsendstates, sv.writeentitiestoclient_sendstates, client - svs.clients + 1);
Protocol_WriteStatsReliable();
}
else
{
- EntityFrameQuake_WriteFrame(msg, numsendstates, sv.writeentitiestoclient_sendstates);
+ EntityFrameQuake_WriteFrame(msg, maxsize, numsendstates, sv.writeentitiestoclient_sendstates);
Protocol_WriteStatsReliable();
}
}
SZ_Clear(&sv.datagram);
}
-static void SV_WriteUnreliableMessages(client_t *client, sizebuf_t *msg)
+static void SV_WriteUnreliableMessages(client_t *client, sizebuf_t *msg, int maxsize)
{
// scan the splitpoints to find out how many we can fit in
int numsegments, j, split;
if (!client->unreliablemsg_splitpoints)
return;
- // always accept the first one if it's within 1400 bytes, this ensures
+ // always accept the first one if it's within 1024 bytes, this ensures
// that very big datagrams which are over the rate limit still get
// through, just to keep it working
- if (msg->cursize + client->unreliablemsg_splitpoint[0] > msg->maxsize && msg->maxsize < 1400)
+ j = msg->cursize + client->unreliablemsg_splitpoint[0];
+ if (maxsize < 1024 && j > maxsize && j <= 1024)
{
numsegments = 1;
- msg->maxsize = 1400;
+ maxsize = 1024;
}
else
for (numsegments = 0;numsegments < client->unreliablemsg_splitpoints;numsegments++)
- if (msg->cursize + client->unreliablemsg_splitpoint[numsegments] > msg->maxsize)
+ if (msg->cursize + client->unreliablemsg_splitpoint[numsegments] > maxsize)
break;
if (numsegments > 0)
{
// note this discards ones that were accepted by the segments scan but
// can not fit, such as a really huge first one that will never ever
// fit in a packet...
- if (msg->cursize + split <= msg->maxsize)
+ if (msg->cursize + split <= maxsize)
SZ_Write(msg, client->unreliablemsg.data, split);
// remove the part we sent, keeping any remaining data
client->unreliablemsg.cursize -= split;
int stats[MAX_CL_STATS];
unsigned char sv_sendclientdatagram_buf[NET_MAXMESSAGE];
+ // obey rate limit by limiting packet frequency if the packet size
+ // limiting fails
+ // (usually this is caused by reliable messages)
+ if (!NetConn_CanSend(client->netconnection))
+ return;
+
// PROTOCOL_DARKPLACES5 and later support packet size limiting of updates
maxrate = max(NET_MINRATE, sv_maxrate.integer);
if (sv_maxrate.integer != maxrate)
Cvar_SetValueQuick(&sv_maxrate, maxrate);
+
// clientrate determines the 'cleartime' of a packet
// (how long to wait before sending another, based on this packet's size)
clientrate = bound(NET_MINRATE, client->rate, maxrate);
- if (LHNETADDRESS_GetAddressType(&host_client->netconnection->peeraddress) == LHNETADDRESSTYPE_LOOP && !sv_ratelimitlocalplayer.integer)
- {
- // for good singleplayer, send huge packets
- maxsize = sizeof(sv_sendclientdatagram_buf);
- maxsize2 = sizeof(sv_sendclientdatagram_buf);
- // never limit frequency in singleplayer
- clientrate = 1000000000;
- }
- else if (sv.protocol == PROTOCOL_QUAKE || sv.protocol == PROTOCOL_QUAKEDP || sv.protocol == PROTOCOL_NEHAHRAMOVIE || sv.protocol == PROTOCOL_NEHAHRABJP || sv.protocol == PROTOCOL_NEHAHRABJP2 || sv.protocol == PROTOCOL_NEHAHRABJP3 || sv.protocol == PROTOCOL_QUAKEWORLD)
+ switch (sv.protocol)
{
+ case PROTOCOL_QUAKE:
+ case PROTOCOL_QUAKEDP:
+ case PROTOCOL_NEHAHRAMOVIE:
+ case PROTOCOL_NEHAHRABJP:
+ case PROTOCOL_NEHAHRABJP2:
+ case PROTOCOL_NEHAHRABJP3:
+ case PROTOCOL_QUAKEWORLD:
// no packet size limit support on Quake protocols because it just
// causes missing entities/effects
// packets are simply sent less often to obey the rate limit
maxsize = 1024;
maxsize2 = 1024;
- }
- else if (sv.protocol == PROTOCOL_DARKPLACES1 || sv.protocol == PROTOCOL_DARKPLACES2 || sv.protocol == PROTOCOL_DARKPLACES3 || sv.protocol == PROTOCOL_DARKPLACES4)
- {
+ break;
+ case PROTOCOL_DARKPLACES1:
+ case PROTOCOL_DARKPLACES2:
+ case PROTOCOL_DARKPLACES3:
+ case PROTOCOL_DARKPLACES4:
// no packet size limit support on DP1-4 protocols because they kick
// the client off if they overflow, and miss effects
// packets are simply sent less often to obey the rate limit
maxsize = sizeof(sv_sendclientdatagram_buf);
maxsize2 = sizeof(sv_sendclientdatagram_buf);
- }
- else
- {
+ break;
+ default:
// DP5 and later protocols support packet size limiting which is a
// better method than limiting packet frequency as QW does
//
// mods that use csqc (they are likely to use less bandwidth anyway)
if (sv.csqc_progsize > 0)
maxsize = maxsize2;
+ break;
}
- // obey rate limit by limiting packet frequency if the packet size
- // limiting fails
- // (usually this is caused by reliable messages)
- if (!NetConn_CanSend(client->netconnection))
- return;
+ if (LHNETADDRESS_GetAddressType(&host_client->netconnection->peeraddress) == LHNETADDRESSTYPE_LOOP && !sv_ratelimitlocalplayer.integer)
+ {
+ // for good singleplayer, send huge packets
+ maxsize = sizeof(sv_sendclientdatagram_buf);
+ maxsize2 = sizeof(sv_sendclientdatagram_buf);
+ // never limit frequency in singleplayer
+ clientrate = 1000000000;
+ }
// while downloading, limit entity updates to half the packet
// (any leftover space will be used for downloading)
maxsize /= 2;
msg.data = sv_sendclientdatagram_buf;
- msg.maxsize = maxsize;
+ msg.maxsize = sizeof(sv_sendclientdatagram_buf);
msg.cursize = 0;
msg.allowoverflow = false;
// add as many queued unreliable messages (effects) as we can fit
// limit effects to half of the remaining space
- msg.maxsize -= (msg.maxsize - msg.cursize) / 2;
if (client->unreliablemsg.cursize)
- SV_WriteUnreliableMessages (client, &msg);
-
- msg.maxsize = maxsize;
+ SV_WriteUnreliableMessages (client, &msg, (msg.cursize + maxsize) / 2);
// now write as many entities as we can fit, and also sends stats
- SV_WriteEntitiesToClient (client, client->edict, &msg);
+ SV_WriteEntitiesToClient (client, client->edict, &msg, maxsize);
}
else if (realtime > client->keepalivetime)
{
// the player isn't totally in the game yet
// send small keepalive messages if too much time has passed
// (may also be sending downloads)
- msg.maxsize = maxsize2;
client->keepalivetime = realtime + 5;
MSG_WriteChar (&msg, svc_nop);
}
- msg.maxsize = maxsize2;
-
// if a download is active, see if there is room to fit some download data
// in this packet
- downloadsize = maxsize * 2 - msg.cursize - 7;
+ downloadsize = min(maxsize*2,maxsize2) - msg.cursize - 7;
if (host_client->download_file && host_client->download_started && downloadsize > 0)
{
fs_offset_t downloadstart;