@techreport{moncaster-conex-problem-00, number = {draft-moncaster-conex-problem-00}, type = {Internet-Draft}, institution = {Internet Engineering Task Force}, publisher = {Internet Engineering Task Force}, note = {Work in Progress}, url = {https://datatracker.ietf.org/doc/draft-moncaster-conex-problem/00/}, author = {T Moncaster and Anne-Louise Burness and Michael Menth and Joao Araujo and Steven Blake and Richard Woundy}, title = {{The Need for Congestion Exposure in the Internet}}, pagetotal = 22, year = 2010, month = mar, day = 1, abstract = {Today's Internet is a product of its history. TCP is the main transport protocol responsible for sharing out bandwidth and preventing a recurrence of congestion collapse while packet drop is the primary signal of congestion at bottlenecks. Since packet drop (and increased delay) impacts all their customers negatively, network operators would like to be able to distinguish between overly aggressive congestion control and a confluence of many low-bandwidth, low-impact flows. But they are unable to see the actual congestion signal and thus, they have to implement bandwidth and/or usage limits based on the only information they can see or measure (the contents of the packet headers and the rate of the traffic). Such measures don't solve the packet-drop problems effectively and are leading to calls for government regulation (which also won't solve the problem). We propose congestion exposure as a possible solution. This allows packets to carry an accurate prediction of the congestion they expect to cause downstream thus allowing it to be visible to ISPs and network operators. This memo sets out the motivations for congestion exposure and introduces a strawman protocol designed to achieve congestion exposure.}, }