diff --git a/sys-kernel/geos_one-sources/ChangeLog b/sys-kernel/geos_one-sources/ChangeLog index 61cb2ec3..8d794888 100644 --- a/sys-kernel/geos_one-sources/ChangeLog +++ b/sys-kernel/geos_one-sources/ChangeLog @@ -2,6 +2,20 @@ # Copyright 1999-2009 Gentoo Foundation; Distributed under the GPL v2 # $Header: $ + 29 Jan 2009; Mario Fetka + +files/linux-sabayon-2.6.28-ignore-mouse-interface-on-macbooks.patch, + +files/linux-2.6-defaults-fat-utf8.patch, +files/linux-2.6.27-lirc.patch, + +files/linux-2.6-x86-tune-generic.patch, + +files/enable-4k-stacks-default-2.6.24.patch, + +files/hz-432-kconfig-option.patch, +files/hz-864-kconfig-option.patch, + +files/1-bcm5974-headers.patch, + +files/2-bcm5974-quad-finger-tapping.patch, + +files/3-bcm5974-macbook5-support.patch, + +files/drm-modesetting-radeon.patch, +files/drm-next.patch, + +files/drm-nouveau.patch, geos_one-sources-2.6.28-r1.ebuild: + integrate alsa-driver-1.0.18a with xa3530 patch and other patches to + optimize running on XA3530 + 28 Jan 2009; Mario Fetka +files/alsa-driver-1.0.18a-xa3530.patch, +files/alsa-driver-1.0.19-input.patch, geos_one-sources-2.6.28-r1.ebuild: diff --git a/sys-kernel/geos_one-sources/Manifest b/sys-kernel/geos_one-sources/Manifest index 11159b27..4c8a708a 100644 --- a/sys-kernel/geos_one-sources/Manifest +++ b/sys-kernel/geos_one-sources/Manifest @@ -1,12 +1,22 @@ +AUX 1-bcm5974-headers.patch 4839 RMD160 83f5ca0a53ebe19aeaaa7d952da5c79187e563af SHA1 157ecb6994b5903cd5a4eccc8552cc2c62b7463d SHA256 33ffab37c533a3a75ac628209bf8ab70e89c3ad81fd4491f2c4051cb66ba1043 +AUX 2-bcm5974-quad-finger-tapping.patch 2014 RMD160 8ae24293dd608db76316ea0ebb90ce7e215df14a SHA1 a02b87b190916b193fe3b183e0fed86d34d863d6 SHA256 9ca9ddc1ffe2c9e23b74597479834c11fb98679aa258b61e1b025d3541f43c28 +AUX 3-bcm5974-macbook5-support.patch 4515 RMD160 e69c5aa44b925c76995b5a040327b2ed4b7acc79 SHA1 62a729437a289cd7474d202dbe3fac0607152a83 SHA256 c1f5027fa74591123ff38968aceb9df715a40922efc865ec2ae2b58b728c49cd AUX alsa-driver-1.0.18a-xa3530.patch 14509 RMD160 7b64bd5303d4e646957c1e49a53adfb012b3de8e SHA1 148a444560bb9473325147d44a6f813b084d4819 SHA256 6236e6e64d09284c13f16aaefb20e54e891d5b7f1d93cdb64cf5d6901f94761c AUX alsa-driver-1.0.19-input.patch 608 RMD160 be8e1f14e5a7ec33102a87dc2fd443a8b49ba329 SHA1 0d4f66a40ccebc034c859b3fd4ff34e436cf4d41 SHA256 414dc3663be345c42e34c3c44087e1b316e67eea6b7979c447effd39e05a8652 AUX colored-printk-2.6.26.patch 11265 RMD160 1e520168b8813754906513317f5c683dbec2b31b SHA1 d321509ccc286b29b8c0fac9ae18205705fea332 SHA256 e8afce0c43cd534a6f707dc9f8769d0ba03322efccd55e3dc3edc3375e44c4ba +AUX drm-modesetting-radeon.patch 1348501 RMD160 d89e04413554815f4a94f684a7f1e0f5a9e62b2b SHA1 39fb1193f0643798705786c85f3c90cdf28fd27a SHA256 a126a762c7df6e9bde0a65b500fefd2e8188b10cd1d15454668d91cbe1a38f7b +AUX drm-next.patch 643528 RMD160 3ec76231e1268821db9c18c11677b0661b1af7fc SHA1 848f637d414433f86b35983827e6465d24674499 SHA256 8f36e6418aa33c51af8b3567ca09a65024fcef5bcec73040a1bc87e5f71a5bf4 +AUX drm-nouveau.patch 546241 RMD160 a79f37c94fdb13b16ae939206477b781b1a235af SHA1 28f6a2e7ebd98b4c491c4cdb036a043b6c13bf0f SHA256 8977a121eb90b6f543b4408bcafd424fab5c55357ebefa192f5b1daed7b725f0 +AUX enable-4k-stacks-default-2.6.24.patch 350 RMD160 b6ea063adaa4ce4f97da967ce84a96d24bd8614c SHA1 799ac8293e84fb723630dcb28be986065dba131d SHA256 fd2f9a3f42ae0512e84ea5979ab8359872b9a0520de0e485ca3846dc84ea0da2 AUX fix-reiser4-for-2.6.28.1.patch 396 RMD160 fc8874e25e2e66d58a49dd6775e483feab8b36e4 SHA1 97f49e0d228bdb590493cb4f07650ab0525133af SHA256 3b2c79070f580003b4fc5229999aff2f871a31997ca6fd65c9688bcb1cb05d0c AUX fix-unionfs-for-2.6.28.1.patch 407 RMD160 62a5c00e357faea5dd802dae40ceff46d2a3b7b2 SHA1 1b4d1c1eb151ea2d4b4d77de37ba05eb1291bad5 SHA256 0c7220bec0afdebf735fa8cd61ab39d72cebdfeb3e17ac3ee9cae5a03350d73e +AUX hz-432-kconfig-option.patch 835 RMD160 932004d8a7a6a1c5b7bbb20750d5a0e3afc9bc3d SHA1 9b7e85abcdac10f899e6b69123dd818e73e3a115 SHA256 3d416e1fccf16d5fa8b27959a9273858c6bcc9edfb5e064fcc1c4492d9b6990a +AUX hz-864-kconfig-option.patch 793 RMD160 bbb6e93838c95dd62b4597b5cec2e3107502c225 SHA1 1182dcd468a5c07e73e611db2ec07f15b81505b4 SHA256 3669964da54cab30579264d129364c04cdf842442f3a190c1f4bcc6d2dea8d74 AUX intel-hda-a3530 13174 RMD160 e9fd8a6ffa3689e68edce74733e76378c467ba72 SHA1 5656993837d657271b0001d8bd4d5047d854713e SHA256 b4543cae20203bbd8d144d92f4ef48348c29bf28b44a9de9eece15e05959979f AUX linux-2.6-defaults-fat-utf8.patch 538 RMD160 4bd6376084e1c2946ab4efe62ee0acddfde9320f SHA1 ddacdab00dc7e2b3f911152bbbd78d80e3dc3305 SHA256 a27450c6b8ee032fe5442c3637272856552ec28bd83fac299d3250ab9e95576f AUX linux-2.6-x86-tune-generic.patch 661 RMD160 86b4ee5a704717cd1a6a47e19cd7941e3d4edd64 SHA1 310ef3fc420d33a72aefb440294e96c63e5042d3 SHA256 0d0b58175241cb519930158766a9b8b92f87155fbce8e7f513b198a848a78916 AUX linux-2.6.27-lirc.patch 392002 RMD160 1fbfb00708859a2cb9b1b58a6db438b9d2f2d2a4 SHA1 5915ab107a468bed6b4b9a7206ec8a8e87f4c073 SHA256 bc13af0a28f10233e4461fa118c511c161740cc303f59a5cb9c53fabb93b6fd1 +AUX linux-sabayon-2.6.28-ignore-mouse-interface-on-macbooks.patch 1446 RMD160 2673b06b8f88a88f8da3e8153e630e52accc149e SHA1 0741eb958644769f4388ec9200b7d25196af94b4 SHA256 9c88c82cd1cda89752fc603362f52f3deea794f88aff8af1c6d082ca63d6b247 AUX squashfs-2.6.28 219 RMD160 c1a3ae8e02643051f171500928005ca85d9d83cf SHA1 cf541d09ec2a8fe77525ebb91dd8e73060a4741a SHA256 a3b608a493ad09c983a5a697e05156697d6875a146690b8cda142854c298a9b1 DIST 2.6.27-AA2.3.tgz 96714 RMD160 ad5ed3f35f15fc10ee45020dd7a80926ef9201c8 SHA1 faf2a096b58a704d33a9a633e8e7ced0859cce26 SHA256 c94064437e6655beb9fd399fdbc443fd8a8db4acd3328ae04f23faf834d82e2e DIST 2.6.27.4-ext4.tar.gz 167096 RMD160 943c56a52034f0423a3517c97a2ab0faac18b6b8 SHA1 3b83be52ef9c11915e3a07a79f7cc06c2722e90b SHA256 ec041900086ca6e3a2e2b3f4ae1ae2fcfbdca6dbe9b3484e5fb16ba7e0252f62 @@ -44,7 +54,7 @@ EBUILD geos_one-sources-2.6.27-r4.ebuild 3191 RMD160 310291b7ef6d9015128e5cc04e2 EBUILD geos_one-sources-2.6.27-r5.ebuild 3268 RMD160 fe08f99491a001054194a78df6aa3f89d23e41f0 SHA1 4c14a4f4765a82eebaa29433bcdf189ee342b1dd SHA256 48dbf7e70fd3caa2b99272173afc94119298278f14a7c3bac7bb93c29437e233 EBUILD geos_one-sources-2.6.27-r7.ebuild 2924 RMD160 425863b77e2fc04cca9051061dcdc9c34588ce4f SHA1 f62de54668415fd45d880a1fccd9702c4d0db9f2 SHA256 03a34dd1bae6de85243ca1c3e2bbb494a59ee11042f6d443c87513c42d8eb58b EBUILD geos_one-sources-2.6.27.ebuild 2821 RMD160 8cddc77851562dcdd261fdda1a98b416a8539cdb SHA1 87f501974802d83ef142114c01c1b7c9dea27a59 SHA256 20cb29a2fcc71d86f85232d092a1339f02f13ac042c570af632d026bad024d54 -EBUILD geos_one-sources-2.6.28-r1.ebuild 3668 RMD160 b3fe95397a120ad4ea81ca97d6cb25b20349325b SHA1 0d37613ec214e09f326805d20f2806edcc61f099 SHA256 0f225349c5032bd3d9b1bbbf3a639be0a0e82968099ea916178f1416d6b0b428 +EBUILD geos_one-sources-2.6.28-r1.ebuild 4178 RMD160 9a0334be5c2e2c9453ae8f2c9a585f0178e238c6 SHA1 cdedcf7bcbfceed568afe1af2f8f8ba01ff02061 SHA256 649853331ba5cfc28917e0ec4c572fc9d19b74f44ca88e271ac378085596f092 EBUILD geos_one-sources-2.6.28.ebuild 2724 RMD160 59ef67d038f90f54f7ad0224263fe3b8309db45d SHA1 f1e32cf7640706c4843679339d1774bbf89bed45 SHA256 dcb53c7bdbe81d51d74cc5f82e6efbade931fd33e962fd37cf71e7800d5d257c -MISC ChangeLog 4281 RMD160 6b2c8c60d345741322297c1409367aa7b751bf4a SHA1 f5a5e28393e1f7d64f53f408ceac7135105eed31 SHA256 74a78b8d4e5dc817602c00b65ffe3845cd5cb77747fdba72cfd69a4a4a04952c +MISC ChangeLog 4994 RMD160 168c1c4cc24293788754c7195f490dbceae18dcd SHA1 2c0372e7b51541f5c470267294f374fedc0823fc SHA256 a0f75a4463611d30f0e661c8356d4da3e610c7d20f57ee3a2eaa2c4819d4eb56 MISC metadata.xml 413 RMD160 7e4e48d47e48804072f4ce93e1948fda82433a90 SHA1 cd61cf36ba79f6635ab7b7957f31609ecf52cc90 SHA256 74fe6520890eaf800a1759f3e4f772fd0e433e5de9cf775a50c7af95b199518b diff --git a/sys-kernel/geos_one-sources/files/1-bcm5974-headers.patch b/sys-kernel/geos_one-sources/files/1-bcm5974-headers.patch new file mode 100644 index 00000000..07116fa2 --- /dev/null +++ b/sys-kernel/geos_one-sources/files/1-bcm5974-headers.patch @@ -0,0 +1,126 @@ +The new unibody Macbooks are equipped with an integrated button and +trackpad. The package header of the trackpad interface has changed to +also contain information about the integrated button. This patch +performs the necessary preparations to allow for the new package +header. + +Signed-off-by: Henrik Rydberg +--- + drivers/input/mouse/bcm5974.c | 42 ++++++++++++++++++++++------------------ + 1 files changed, 23 insertions(+), 19 deletions(-) +diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c +index 2998a6a..e97462e 100644 +--- a/drivers/input/mouse/bcm5974.c ++++ b/drivers/input/mouse/bcm5974.c +@@ -96,13 +96,14 @@ struct bt_data { + u8 rel_y; /* relative y coordinate */ + }; + +-/* trackpad header structure */ +-struct tp_header { +- u8 unknown1[16]; /* constants, timers, etc */ +- u8 fingers; /* number of fingers on trackpad */ +- u8 unknown2[9]; /* constants, timers, etc */ ++/* trackpad header types */ ++enum tp_type { ++ TYPE1 /* plain trackpad */ + }; + ++/* trackpad finger data offsets */ ++#define FINGER_TYPE1 26 ++ + /* trackpad finger structure */ + struct tp_finger { + __le16 origin; /* zero when switching track finger */ +@@ -119,11 +120,9 @@ struct tp_finger { + __le16 multi; /* one finger: varies, more fingers: constant */ + }; + +-/* trackpad data structure, empirically at least ten fingers */ +-struct tp_data { +- struct tp_header header; +- struct tp_finger finger[16]; +-}; ++/* trackpad finger data size, empirically at least ten fingers */ ++#define SIZEOF_FINGER sizeof(struct tp_finger) ++#define SIZEOF_ALL_FINGERS (16 * SIZEOF_FINGER) + + /* device-specific parameters */ + struct bcm5974_param { +@@ -139,6 +138,8 @@ struct bcm5974_config { + int bt_ep; /* the endpoint of the button interface */ + int bt_datalen; /* data length of the button interface */ + int tp_ep; /* the endpoint of the trackpad interface */ ++ enum tp_type tp_type; /* type of trackpad interface */ ++ int tp_offset; /* offset to trackpad finger data */ + int tp_datalen; /* data length of the trackpad interface */ + struct bcm5974_param p; /* finger pressure limits */ + struct bcm5974_param w; /* finger width limits */ +@@ -158,7 +159,7 @@ struct bcm5974 { + struct urb *bt_urb; /* button usb request block */ + struct bt_data *bt_data; /* button transferred data */ + struct urb *tp_urb; /* trackpad usb request block */ +- struct tp_data *tp_data; /* trackpad transferred data */ ++ u8 *tp_data; /* trackpad transferred data */ + int fingers; /* number of fingers on trackpad */ + }; + +@@ -184,7 +185,7 @@ static const struct bcm5974_config bcm5974_config_table[] = { + USB_DEVICE_ID_APPLE_WELLSPRING_ISO, + USB_DEVICE_ID_APPLE_WELLSPRING_JIS, + 0x84, sizeof(struct bt_data), +- 0x81, sizeof(struct tp_data), ++ 0x81, TYPE1, FINGER_TYPE1, FINGER_TYPE1 + SIZEOF_ALL_FINGERS, + { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 256 }, + { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 }, + { DIM_X, DIM_X / SN_COORD, -4824, 5342 }, +@@ -195,7 +196,7 @@ static const struct bcm5974_config bcm5974_config_table[] = { + USB_DEVICE_ID_APPLE_WELLSPRING2_ISO, + USB_DEVICE_ID_APPLE_WELLSPRING2_JIS, + 0x84, sizeof(struct bt_data), +- 0x81, sizeof(struct tp_data), ++ 0x81, TYPE1, FINGER_TYPE1, FINGER_TYPE1 + SIZEOF_ALL_FINGERS, + { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 256 }, + { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 }, + { DIM_X, DIM_X / SN_COORD, -4824, 4824 }, +@@ -276,18 +277,20 @@ static int report_bt_state(struct bcm5974 *dev, int size) + static int report_tp_state(struct bcm5974 *dev, int size) + { + const struct bcm5974_config *c = &dev->cfg; +- const struct tp_finger *f = dev->tp_data->finger; ++ const struct tp_finger *f; + struct input_dev *input = dev->input; +- const int fingers = (size - 26) / 28; +- int raw_p, raw_w, raw_x, raw_y; ++ int raw_p, raw_w, raw_x, raw_y, raw_n; + int ptest = 0, origin = 0, nmin = 0, nmax = 0; + int abs_p = 0, abs_w = 0, abs_x = 0, abs_y = 0; + +- if (size < 26 || (size - 26) % 28 != 0) ++ if (size < c->tp_offset || (size - c->tp_offset) % SIZEOF_FINGER != 0) + return -EIO; + ++ f = (const struct tp_finger *)(dev->tp_data + c->tp_offset); ++ raw_n = (size - c->tp_offset) / SIZEOF_FINGER; ++ + /* always track the first finger; when detached, start over */ +- if (fingers) { ++ if (raw_n) { + raw_p = raw2int(f->force_major); + raw_w = raw2int(f->size_major); + raw_x = raw2int(f->abs_x); +@@ -307,12 +310,13 @@ static int report_tp_state(struct bcm5974 *dev, int size) + abs_w = int2bound(&c->w, raw_w); + abs_x = int2bound(&c->x, raw_x - c->x.devmin); + abs_y = int2bound(&c->y, c->y.devmax - raw_y); +- for (; f != dev->tp_data->finger + fingers; f++) { ++ while (raw_n--) { + ptest = int2bound(&c->p, raw2int(f->force_major)); + if (ptest > PRESSURE_LOW) + nmax++; + if (ptest > PRESSURE_HIGH) + nmin++; ++ f++; + } + } + diff --git a/sys-kernel/geos_one-sources/files/2-bcm5974-quad-finger-tapping.patch b/sys-kernel/geos_one-sources/files/2-bcm5974-quad-finger-tapping.patch new file mode 100644 index 00000000..542f37a9 --- /dev/null +++ b/sys-kernel/geos_one-sources/files/2-bcm5974-quad-finger-tapping.patch @@ -0,0 +1,46 @@ +The integrated button on the new unibody Macbooks presents a need to +report explicit four-finger actions. Evidently, the finger pressing +the button is also touching the trackpad, so in order to fully support +three-finger actions, the driver must be able to report four-finger +actions. This patch adds a new button, BTN_TOOL_QUADTAP, which +achieves this. + +Signed-off-by: Henrik Rydberg +--- + drivers/input/mouse/bcm5974.c | 4 +++- + include/linux/input.h | 1 + + 2 files changed, 4 insertions(+), 1 deletions(-) +diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c +index e97462e..a170574 100644 +--- a/drivers/input/mouse/bcm5974.c ++++ b/drivers/input/mouse/bcm5974.c +@@ -258,6 +258,7 @@ static void setup_events_to_report(struct input_dev *input_dev, + __set_bit(BTN_TOOL_FINGER, input_dev->keybit); + __set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit); + __set_bit(BTN_TOOL_TRIPLETAP, input_dev->keybit); ++ __set_bit(BTN_TOOL_QUADTAP, input_dev->keybit); + __set_bit(BTN_LEFT, input_dev->keybit); + } + +@@ -328,7 +329,8 @@ static int report_tp_state(struct bcm5974 *dev, int size) + input_report_key(input, BTN_TOUCH, dev->fingers > 0); + input_report_key(input, BTN_TOOL_FINGER, dev->fingers == 1); + input_report_key(input, BTN_TOOL_DOUBLETAP, dev->fingers == 2); +- input_report_key(input, BTN_TOOL_TRIPLETAP, dev->fingers > 2); ++ input_report_key(input, BTN_TOOL_TRIPLETAP, dev->fingers == 3); ++ input_report_key(input, BTN_TOOL_QUADTAP, dev->fingers > 3); + + input_report_abs(input, ABS_PRESSURE, abs_p); + input_report_abs(input, ABS_TOOL_WIDTH, abs_w); +diff --git a/include/linux/input.h b/include/linux/input.h +index b86fb55..0082b24 100644 +--- a/include/linux/input.h ++++ b/include/linux/input.h +@@ -443,6 +443,7 @@ struct input_absinfo { + #define BTN_STYLUS2 0x14c + #define BTN_TOOL_DOUBLETAP 0x14d + #define BTN_TOOL_TRIPLETAP 0x14e ++#define BTN_TOOL_QUADTAP 0x14f /* Four fingers on trackpad */ + + #define BTN_WHEEL 0x150 + #define BTN_GEAR_DOWN 0x150 diff --git a/sys-kernel/geos_one-sources/files/3-bcm5974-macbook5-support.patch b/sys-kernel/geos_one-sources/files/3-bcm5974-macbook5-support.patch new file mode 100644 index 00000000..454fec87 --- /dev/null +++ b/sys-kernel/geos_one-sources/files/3-bcm5974-macbook5-support.patch @@ -0,0 +1,129 @@ +This patch adds support for the new unibody Macbook, with physically +integrated button and trackpad. A new button, BTN_TOOL_PRESS, is +introduced to allow for non-trivial handling of this device in user +space. + +Signed-off-by: Henrik Rydberg +Tested-by: David M. Lary +--- + drivers/input/mouse/bcm5974.c | 42 +++++++++++++++++++++++++++++++++++++++- + include/linux/input.h | 1 + + 2 files changed, 41 insertions(+), 2 deletions(-) +diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c +index a170574..f85a296 100644 +--- a/drivers/input/mouse/bcm5974.c ++++ b/drivers/input/mouse/bcm5974.c +@@ -51,6 +51,10 @@ + #define USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI 0x0230 + #define USB_DEVICE_ID_APPLE_WELLSPRING2_ISO 0x0231 + #define USB_DEVICE_ID_APPLE_WELLSPRING2_JIS 0x0232 ++/* Macbook5,1 (unibody), aka wellspring3 */ ++#define USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI 0x0236 ++#define USB_DEVICE_ID_APPLE_WELLSPRING3_ISO 0x0237 ++#define USB_DEVICE_ID_APPLE_WELLSPRING3_JIS 0x0238 + + #define BCM5974_DEVICE(prod) { \ + .match_flags = (USB_DEVICE_ID_MATCH_DEVICE | \ +@@ -72,6 +76,10 @@ static const struct usb_device_id bcm5974_table[] = { + BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI), + BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING2_ISO), + BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING2_JIS), ++ /* Macbook5,1 */ ++ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI), ++ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING3_ISO), ++ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING3_JIS), + /* Terminating entry */ + {} + }; +@@ -98,11 +106,19 @@ struct bt_data { + + /* trackpad header types */ + enum tp_type { +- TYPE1 /* plain trackpad */ ++ TYPE1, /* plain trackpad */ ++ TYPE2 /* button integrated in trackpad */ + }; + + /* trackpad finger data offsets */ + #define FINGER_TYPE1 26 ++#define FINGER_TYPE2 30 ++ ++/* trackpad button data offsets */ ++#define BUTTON_TYPE2 15 ++ ++/* integrated button capability by configuration */ ++#define HAS_INTEGRATED_BUTTON(c) (c->tp_type == TYPE2) + + /* trackpad finger structure */ + struct tp_finger { +@@ -202,6 +218,17 @@ static const struct bcm5974_config bcm5974_config_table[] = { + { DIM_X, DIM_X / SN_COORD, -4824, 4824 }, + { DIM_Y, DIM_Y / SN_COORD, -172, 4290 } + }, ++ { ++ USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI, ++ USB_DEVICE_ID_APPLE_WELLSPRING3_ISO, ++ USB_DEVICE_ID_APPLE_WELLSPRING3_JIS, ++ 0x84, sizeof(struct bt_data), ++ 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, ++ { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 }, ++ { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 }, ++ { DIM_X, DIM_X / SN_COORD, -4460, 5166 }, ++ { DIM_Y, DIM_Y / SN_COORD, -75, 6700 } ++ }, + {} + }; + +@@ -259,6 +286,8 @@ static void setup_events_to_report(struct input_dev *input_dev, + __set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit); + __set_bit(BTN_TOOL_TRIPLETAP, input_dev->keybit); + __set_bit(BTN_TOOL_QUADTAP, input_dev->keybit); ++ if (HAS_INTEGRATED_BUTTON(cfg)) ++ __set_bit(BTN_TOOL_PRESS, input_dev->keybit); + __set_bit(BTN_LEFT, input_dev->keybit); + } + +@@ -281,7 +310,7 @@ static int report_tp_state(struct bcm5974 *dev, int size) + const struct tp_finger *f; + struct input_dev *input = dev->input; + int raw_p, raw_w, raw_x, raw_y, raw_n; +- int ptest = 0, origin = 0, nmin = 0, nmax = 0; ++ int ptest = 0, origin = 0, ibt = 0, nmin = 0, nmax = 0; + int abs_p = 0, abs_w = 0, abs_x = 0, abs_y = 0; + + if (size < c->tp_offset || (size - c->tp_offset) % SIZEOF_FINGER != 0) +@@ -303,6 +332,10 @@ static int report_tp_state(struct bcm5974 *dev, int size) + + ptest = int2bound(&c->p, raw_p); + origin = raw2int(f->origin); ++ ++ /* set the integrated button if applicable */ ++ if (c->tp_type == TYPE2) ++ ibt = raw2int(dev->tp_data[BUTTON_TYPE2]); + } + + /* while tracking finger still valid, count all fingers */ +@@ -346,6 +379,11 @@ static int report_tp_state(struct bcm5974 *dev, int size) + + } + ++ if (HAS_INTEGRATED_BUTTON(c)) { ++ input_report_key(input, BTN_TOOL_PRESS, ibt); ++ input_report_key(input, BTN_LEFT, ibt); ++ } ++ + input_sync(input); + + return 0; +diff --git a/include/linux/input.h b/include/linux/input.h +index 0082b24..6afa6b3 100644 +--- a/include/linux/input.h ++++ b/include/linux/input.h +@@ -438,6 +438,7 @@ struct input_absinfo { + #define BTN_TOOL_FINGER 0x145 + #define BTN_TOOL_MOUSE 0x146 + #define BTN_TOOL_LENS 0x147 ++#define BTN_TOOL_PRESS 0x148 /* The trackpad is a physical button */ + #define BTN_TOUCH 0x14a + #define BTN_STYLUS 0x14b + #define BTN_STYLUS2 0x14c diff --git a/sys-kernel/geos_one-sources/files/drm-modesetting-radeon.patch b/sys-kernel/geos_one-sources/files/drm-modesetting-radeon.patch new file mode 100644 index 00000000..faf39e31 --- /dev/null +++ b/sys-kernel/geos_one-sources/files/drm-modesetting-radeon.patch @@ -0,0 +1,36236 @@ +commit 3ef2b914f9667c70a9711fa31ff95d639ccc8e2e +Author: Dave Airlie +Date: Thu Dec 18 23:01:12 2008 +1000 + + drm: don't set cached bits on pages from uncached allocator. + + This should make AGP no corrupt itself to all buggery + (cherry picked from commit 55c0a3f1d3693cb5c3b65f37d4bb6e8a3c025460) + +commit be5f7447cc492545e225170be38e5e65d7de37d9 +Author: Dave Airlie +Date: Tue Dec 2 16:38:04 2008 +1000 + + drop old CS + +commit 91bfed87127c1bd17098803184e43783d55f35e5 +Author: Dave Airlie +Date: Mon Dec 1 15:09:27 2008 +1000 + + radeon: update with latest CS bits + +commit a7f6175e3610651b1f84a1b8aa1f61a9a4f3bdb6 +Author: Dave Airlie +Date: Wed Dec 17 06:56:36 2008 +1000 + + fixups post rebase + +commit cd29f247278a5949956adbfb75229dc9bad19f8e +Author: Dave Airlie +Date: Thu Dec 11 01:35:09 2008 +1000 + + radeon: fix agp mode when kms isn't enabled + +commit 0d0f9c970263750a68131b5f88735c59f605c69f +Author: Dave Airlie +Date: Wed Dec 3 17:22:07 2008 +1100 + + radeon: add support for accessing disabled ROMs. + +commit 840d823d6f33c1852b92791093b23b1ce7af25ac +Author: Dave Airlie +Date: Wed Dec 3 17:19:02 2008 +1100 + + radeon: make cail do pll read/write via actual PLL functions + +commit 049d865d47cf229234cd7dd0ef8927a41a142b0e +Author: Dave Airlie +Date: Wed Dec 3 16:36:00 2008 +1100 + + radeon: atom fixes for endianness + +commit df08dc63cbbd19a8ea0659bdc379147a59201b4c +Author: Dave Airlie +Date: Wed Dec 3 20:28:05 2008 +1000 + + radeon: AGP fixes round 2. + + More I can't believe its not AGP fixes. + Limit the AGP aperture to the GART size, and init the registers + in the right place + +commit 79d6ac96108c29dbce90ce605b01dd6b71673092 +Author: Dave Airlie +Date: Tue Dec 2 12:49:17 2008 +1000 + + radeon: AGP fixes + + 1. Proper PCIE fallback on PCIE cards. + 2. Setup agp base + location regs properly + +commit e6a1353d11f485c99371e0b89c669da0c23e8af4 +Author: Michal Schmidt +Date: Tue Dec 2 08:06:59 2008 +1000 + + radeon: fix IGP GART calcs + + fedora bz 473895 + +commit d879f36aca29ece7c3bfada452266e82df3f2392 +Author: Dave Airlie +Date: Thu Nov 27 17:14:40 2008 +1000 + + radeon: post any GPUs that aren't posted + +commit d04c4750aba2b0bd306ab36a0002f2609ed6cc9a +Author: Dave Airlie +Date: Mon Nov 24 15:18:31 2008 +1000 + + radeon: enable larger GART size on PCIE/IGP + + Allocate a larger GART on PCIE and IGP chipsets controlled by the gart_size + command line option. + + Default to 512MB gart. + +commit ae7c33fa98bf4921292f6dffb6e60dd2b85cddfa +Author: Dave Airlie +Date: Mon Nov 24 15:17:49 2008 +1000 + + radeon: add some missing feature checks for modesetting + +commit 54fd140cc8c63d5c65fbcdf21259025fd8a431e8 +Author: Dave Airlie +Date: Mon Nov 24 11:41:06 2008 +1100 + + radeon: make rs480/rs690 gart invalidate work properly. + + we really need to read back to make sure the invalidate has happened + before continuing along happily in life. + +commit 5e418cef1bfca7e8e0821d6e44a1e1a78e015b17 +Author: Dave Airlie +Date: Sun Nov 23 19:19:02 2008 +1000 + + drm: we have discardable now so don't do special cases + +commit 87e2de7aef77b96a8be3532358b6ceb9540d622a +Author: Dave Airlie +Date: Fri Nov 21 14:52:25 2008 +1000 + + atom: fix bug in parser for MC reads + +commit 52eccc11083a1c0636337520187a4e2cf9bfe989 +Author: Dave Airlie +Date: Mon Nov 17 18:14:28 2008 +1000 + + radeon: turn of VRAM zeroing by default for now - needs work + +commit 7a32efbc88ed749c0cd44a95bb4888cd2bc806c0 +Author: Dave Airlie +Date: Mon Nov 17 09:03:36 2008 +1000 + + radeon: fix return value for no relocs + +commit 5a47b4d8a97949288ebe8f2f3437637e405d1db8 +Author: Dave Airlie +Date: Mon Nov 17 09:03:01 2008 +1000 + + ttm: add discard for VRAM buffers + +commit 160794bbbb90088a86acc96ca5268bc46255582d +Author: Dave Airlie +Date: Fri Nov 14 15:52:32 2008 +1000 + + drm: fix the exit path of the bo unlocking + +commit 1ea46b5e3dc7e75afe1936594e0e085c9dc8665d +Author: Dave Airlie +Date: Fri Nov 14 15:51:44 2008 +1000 + + radeon: fixup relocate to work on locate write buffers in VRAM first. + + Fixup failure paths and make EAGAIN work + +commit e16a76e302eba9b53858457054059985ec0ef9df +Author: Dave Airlie +Date: Thu Nov 13 17:22:26 2008 +1100 + + radeon: fix some issues since last rebase + +commit e9ab870ce368dab7ed9e859c157074913353b589 +Author: Dave Airlie +Date: Wed Nov 12 09:01:14 2008 +1000 + + radeon: fix more build + +commit 3b7c8b9cb7db0749540f9b042c7bf9e8e65f320d +Author: Dave Airlie +Date: Wed Nov 12 08:59:04 2008 +1000 + + radeon: fix compile + +commit 8455e44f0909b58b60465e7e8f84b77ec12710cc +Author: Dave Airlie +Date: Mon Nov 10 15:39:25 2008 +1000 + + radeon: fix dumbness in cp ring check + +commit f6654d03d800ea8e75bffdfad679cf8450c60361 +Author: Dave Airlie +Date: Mon Nov 10 14:26:11 2008 +1000 + + radeon: add gart useable size to report to userspace + +commit 0fb851b61a586a96a39d7f8725b12152c1fa1c27 +Author: Dave Airlie +Date: Sun Nov 9 20:34:49 2008 +1000 + + radeon: fix powerpc oops on rv280 + +commit b264e9bd0230865da5406eb991f9afdbedd49385 +Author: Dave Airlie +Date: Sun Nov 9 10:23:43 2008 +1000 + + radeon: upgrade atom headers + +commit b272406d53a36536d0344f3cf1b78322db97a137 +Author: Dave Airlie +Date: Sat Nov 8 14:39:41 2008 +1000 + + radeon: fix fence race condition hopefully + + For some reason reading the SCRATCH reg from RAM causes some race to occur. + Hopefully fix this. + +commit 48ebb844296d2b5ee892e679c482542f84add76d +Author: Dave Airlie +Date: Sat Nov 8 11:31:03 2008 +1000 + + drm/radeon: add dpms connector functions + +commit da66c52dd9140f17862e9d4cd55fbe03eb6793de +Author: Dave Airlie +Date: Fri Nov 7 16:22:22 2008 +1000 + + radeon: avivo cursors are across the full surface. + + fixes cursor on second head + +commit 051c62dde66c86d90ccf186636a431820ef72b2e +Author: Dave Airlie +Date: Fri Nov 7 16:21:03 2008 +1000 + + modesetting: set the crtc x,y after the mode base change + +commit ffd84bdad116d3388aca75993b7994d8a2ba5b19 +Author: Dave Airlie +Date: Wed Nov 5 10:23:35 2008 +1000 + + drm/radeon: add uncached allocator to drm ttm code. + + This allows re-use of uc/wc marked pages + +commit ac5d2213565ad5176184a3637e16e9fd5dd817d3 +Author: Dave Airlie +Date: Wed Nov 5 10:22:27 2008 +1000 + + radeon: fix ring tail overflow issue since alignment + +commit f95041c9888a7e10529ffc5adea3c8847306e39f +Author: Dave Airlie +Date: Tue Nov 4 13:18:02 2008 +1000 + + radeon: disable HDP read cache for now + +commit f6ef4e052ed31c84c8ee11285a17dfa35c6c54e4 +Author: Dave Airlie +Date: Tue Nov 4 12:02:55 2008 +1000 + + radeon: force all ring writes to 16-dword alignment. + + Also set the fetch size to what tcore/fglrx uses. + +commit 5b928dc4e38f838d5d0c8e3ceb5dac821343df20 +Author: Dave Airlie +Date: Mon Nov 3 15:43:08 2008 +1000 + + radeon: fixup vram visible calculation to take a/c pinned objects for now + +commit 6b795f96d71a23a6c7107dc60ffadb485f4fb6b2 +Author: Dave Airlie +Date: Mon Nov 3 09:56:02 2008 +1100 + + radeon: if modesetting state is unknown make it known so pm-utils can use it + +commit 5370cda9cc412d326f8a91d813fbcdb57aa79bbb +Author: Dave Airlie +Date: Fri Oct 31 14:40:12 2008 +1000 + + radeon: fix ROP values for the paint ROP + +commit 2491b4f04209ffb64dfb4ef0a8a025aadf658a3f +Author: Dave Airlie +Date: Thu Oct 30 14:00:39 2008 +1000 + + radeon: add mtrr support for VRAM aperture. + + This speeds things up a bit + +commit 890f6afd5f01bbdf283ca94c5c335b18fd219673 +Author: Dave Airlie +Date: Thu Oct 30 14:00:15 2008 +1000 + + radeon: disable AGP for certain chips if not specified until we figure it out + +commit acc5f117fb05ba34e4f89618a3a93b80cc99866b +Author: Dave Airlie +Date: Wed Oct 29 15:45:47 2008 +1000 + + radeon: disable debugging message + +commit fa5a846413486577b49552afea9c162560725666 +Author: Dave Airlie +Date: Wed Oct 29 15:37:32 2008 +1000 + + radeon: commit ring after emitting the buffer discards + +commit 82f3ab65ffece608653a51c3d54131aa77295853 +Author: Dave Airlie +Date: Wed Oct 29 17:39:27 2008 +1100 + + radeon: fix whitespace in encoders C file + +commit be6de6c167a860c54b9d0b867d17e4eb823382d9 +Author: Dave Airlie +Date: Wed Oct 29 17:39:08 2008 +1100 + + radeon: add more HDMI bits + +commit 09d8988dc61faec36416a3fb1ffe42051331e0e9 +Author: Dave Airlie +Date: Thu Oct 30 01:41:34 2008 +1000 + + radeon: set dma bufs bo type to a kernel type + +commit b3ee8fbc2b9886252b42372c4194aa46e8185745 +Author: Dave Airlie +Date: Thu Oct 30 01:05:48 2008 +1000 + + drm: reorder AGP unloading wrt driver unloading + +commit efb0c1a5e99cd047a8cbc414e42f669a387ddc97 +Author: Dave Airlie +Date: Wed Oct 29 15:46:16 2008 +1100 + + radeon: enable DVI-D + HDMI connectors. + + This allows the rs690 to work on DVI + +commit 2d49e176172879617745fc8cf7573f05ec6255a1 +Author: Dave Airlie +Date: Tue Oct 28 20:33:13 2008 +1000 + + radeon: setup isync cntl properly + +commit a2afee6c7a16c779cab6228d18879049859f73cb +Author: Dave Airlie +Date: Tue Oct 28 20:31:27 2008 +1000 + + radeon: add more debugging + +commit c8f002393c164265f20e95e355a208d2fc547ea1 +Author: Dave Airlie +Date: Tue Oct 28 20:26:04 2008 +1000 + + radeon: overhaul ring interactions + + emit in 16-dword blocks, emit irqs at same time as everything else + +commit c2a07e846c21b52cce903ffa0110b6c1a131c033 +Author: Dave Airlie +Date: Tue Oct 28 16:49:09 2008 +1000 + + radeon: fix race in sysfs + +commit e44379ca283ec1badbc828f1960b7b1b7fa91fdc +Author: Dave Airlie +Date: Tue Oct 28 16:46:47 2008 +1000 + + radeon: add proc debugging for interrupts/ring + +commit 944b8472ec2cc27568b43c10d9dd304903d203da +Author: Dave Airlie +Date: Tue Oct 28 16:44:54 2008 +1000 + + radeon: only enable dynclks if asked for + +commit 2510851ce9b1ba12cd33b7da829fb6aa8a8ef778 +Author: Dave Airlie +Date: Tue Oct 28 06:35:10 2008 +1000 + + radeon: add wait rendering API + +commit a25906493affd49bd62063b662bc1df3de07e811 +Author: Dave Airlie +Date: Tue Oct 28 06:05:58 2008 +1000 + + radeon: rs480 fixes for bus mastering + +commit 87a2b0e85115e6074a9294f632146dd75296b1f3 +Author: Dave Airlie +Date: Mon Oct 27 18:30:15 2008 +1000 + + radeon: remove unused gem indirect ioctl + +commit a00b2dd57874c7b909a598a6121229d78fe4c178 +Author: Dave Airlie +Date: Mon Oct 27 16:41:09 2008 +1000 + + radeon: fix some warnings + +commit 177833fa51ab72fcd730df1af008db3839466401 +Author: Dave Airlie +Date: Mon Oct 27 16:40:34 2008 +1000 + + radeon: fix free after refcount + +commit cd5811b3bcc622ab250efdbd80f0319465db119f +Author: Dave Airlie +Date: Mon Oct 27 16:40:15 2008 +1000 + + radeon: CS2 make it all work with new relocs style + +commit 1b2982a1a7418b32f25ebfe11fb6390ed231aaa6 +Author: Dave Airlie +Date: Mon Oct 27 16:39:12 2008 +1000 + + radeon: don't copy to user the cs ids + +commit 7185c835302f0c755874b0b257d9dadc475176a6 +Author: Dave Airlie +Date: Mon Oct 27 14:23:22 2008 +1000 + + radeon: make new CS2 command submission interface port older interface to this + +commit 3d542cabb417a8e30dd29646ff1018af4c223012 +Author: Dave Airlie +Date: Thu Oct 23 19:13:50 2008 +1000 + + radeon: release agp on module unload + +commit 34db80a441a96bf9ea7cbc90e5a14c73b49689c8 +Author: Dave Airlie +Date: Tue Oct 21 14:15:23 2008 +1000 + + radeon: add r423 bits to modesetting + +commit 7f5cf386d166ae4d0605da2991b207beb85bd805 +Author: Dave Airlie +Date: Tue Oct 21 14:12:38 2008 +1000 + + radeon: pull bus master enable into its own function + +commit 2b18550c4c09d8c2a619d19523115d20c2252446 +Author: Dave Airlie +Date: Mon Oct 20 14:44:23 2008 +1000 + + radeon: fix accessible VRAM sizing. + + We actually were passing accessible to userspace, but I thought + the code sized it correctly, however it doesn't seem to. + +commit 5a32cf2ab1059e1e94cc4297f5b9818ea8a2a7c3 +Author: Dave Airlie +Date: Mon Oct 20 13:41:05 2008 +1000 + + radeon: update proper chip family + +commit a3151e2871daf26580a74f9cdabffc31a404227b +Author: Dave Airlie +Date: Mon Oct 20 12:08:50 2008 +1000 + + radeon: fixup scratch register interactions properly + +commit 32fa43d71a24917aafbf6060581e76fb00faced2 +Author: Dave Airlie +Date: Mon Oct 20 10:27:50 2008 +1000 + + radeon: make writeback work again + +commit ddaf24277d4932b17a91f48eb87b60374dc16b44 +Author: Dave Airlie +Date: Mon Oct 20 09:20:23 2008 +1000 + + drm: cleanup some warnings + +commit eaf752a24253ede8cbcbbd9735bfdb0c94b3dac5 +Author: Dave Airlie +Date: Thu Oct 16 16:29:08 2008 +1000 + + radeon: fix small typo in agp code + +commit 5fad452a07cf297a32141283d7599333adcd7357 +Author: Dave Airlie +Date: Thu Oct 16 16:17:01 2008 +1000 + + radeon: workaround failure to parse some rs48x edid + +commit c83927675ff7ea640fba78135faa7a03f65e433a +Author: Dave Airlie +Date: Thu Oct 16 16:15:08 2008 +1000 + + radeon: don't enable dynclks on rs48x + +commit 2c45c5d0114efdbd05c72d6b9bf82f9a31554148 +Author: Dave Airlie +Date: Wed Oct 15 15:12:20 2008 +1000 + + radeon: allow r100/r200 modesetting to be forced on by users + +commit adbcbdbd2103fd544caf8e0c7a80a3a67dbe9323 +Author: Dave Airlie +Date: Thu Oct 16 22:06:00 2008 +1000 + + radeon: fix unused agp functionality + +commit ad5a0132e4fddcfaa686238c5c262064a631272c +Author: Dave Airlie +Date: Thu Oct 16 22:05:02 2008 +1000 + + radeon: add some more r100 support to test AGP + +commit a686504306d6dd17dc2b2d6b870de57fa37ccd89 +Author: airlied +Date: Wed Oct 15 23:58:03 2008 +1000 + + radeon: add initial agp support. + + This add agpmode command line option. + +commit fd5ddcda762eb709a9f521eb1f609f978d975eb8 +Author: airlied +Date: Wed Oct 15 23:57:21 2008 +1000 + + radeon: add CS support for r100/r200 in 2D driver + +commit 12919eae61647e151b9b21cab481a2bf2ca4579f +Author: Dave Airlie +Date: Thu Oct 9 16:37:23 2008 +1100 + + radeon: fixup interrupt suspend/resume + +commit 19436eeb911d0bd6b8a43f3d3dc35c10c3702af3 +Author: Dave Airlie +Date: Wed Oct 8 16:57:12 2008 +1000 + + radeon: fixup suspend/resume bus master enable + +commit a094d0b696c90e7570e9088b5884c4ffd2a6ff3c +Author: Dave Airlie +Date: Wed Oct 8 16:56:04 2008 +1000 + + radeon: re-enable hw blits for copying from VRAM + +commit 3372777c97b12b2ae43330348c91910d935deeef +Author: Dave Airlie +Date: Wed Oct 8 16:53:43 2008 +1000 + + radeon: fix buffer copying for VRAM->TT + +commit 7a3cf0dc97dca5e9d2c5af48769a96ee56f25161 +Author: Dave Airlie +Date: Wed Oct 8 16:51:58 2008 +1000 + + radeon: move memcpy until after CP is stopped + +commit 59badc3e2e707b5e6af78ddf35872d05e749cd80 +Author: Dave Airlie +Date: Tue Oct 7 16:34:12 2008 +1000 + + drm: remove stray debug code + +commit fd4bae063aa88a7e1d270774c4ac9677cce9ea5d +Author: Dave Airlie +Date: Tue Oct 7 16:31:22 2008 +1000 + + radeon: use discardable flags on no backing store objects + +commit a8dc915a657a81ea555b8448d54dae56041980fc +Author: Dave Airlie +Date: Tue Oct 7 16:30:09 2008 +1000 + + drm: add discardable flag. + + This discards memory contents on suspend/resume with the + hope the upper layers know something we don't. + +commit ab3dff65889deb568597a567055a01baa7d1a412 +Author: Dave Airlie +Date: Tue Oct 7 16:27:31 2008 +1000 + + drm/radeon: initial suspend/resume fix. + + This enables the evict code and also sets radeon up + to allow evict from VRAM to LOCAL + +commit b8179989154d5819f4a96b058a5c12be590af977 +Author: Dave Airlie +Date: Mon Oct 6 16:20:38 2008 +1000 + + radeon: fixup alignment between GEM and TTM + + fixup the interface between gem and ttm for alignment. makes Xv work better + +commit 7ada9a28eb20fa768330c17036cc5c9a2308a151 +Author: Dave Airlie +Date: Wed Oct 1 11:27:02 2008 +1000 + + radeon: fixup GEM pinned offset retrieval for mesa + +commit 3cbda5ddc1aa78d586ff3447e97809b2449ff199 +Author: Dave Airlie +Date: Wed Oct 1 11:25:54 2008 +1000 + + drm: fixup clean flag handling properly + +commit 05f32d7b440a62f801e9448e9ff4414fb18f4809 +Author: Alex Deucher +Date: Mon Sep 29 14:57:11 2008 +1000 + + radeon: use atom for ext tmds on r4xx + +commit ec07f7ea9580ecee93710d3eb5e6857eeedc20eb +Author: Alex Deucher +Date: Mon Sep 29 14:56:32 2008 +1000 + + radeon: make atom on r4xx a module option + + default is legacy modesetting. pass module option r4xx_atom + to try using atom on r4xx. + +commit c1eccaae46f23ed1f435160d1dacc9362cbe1a45 +Author: Alex Deucher +Date: Mon Sep 29 14:55:16 2008 +1000 + + radeon: first pass at using atombios on r4xx hw + +commit ad15496283c36867d9f6fbada40ad276324e3ccd +Author: Dave Airlie +Date: Mon Sep 29 14:50:47 2008 +1000 + + radeon: add r600 modesetting registers writes + +commit 7698bdda7f463cb96996fad98190f8c710809ff4 +Author: Dave Airlie +Date: Mon Sep 29 14:45:11 2008 +1000 + + radeon: parse object tables for connectors on r600 + +commit f29b90ef222fd8d24973593f1e90c8af649df4d7 +Author: Dave Airlie +Date: Tue Sep 23 16:34:55 2008 +1000 + + radeon: fix minor cursor disappearing issues + +commit 87cdcf417c6edd7be1a260eccb39321a072a7021 +Author: Kristian Høgsberg +Date: Mon Sep 22 18:40:36 2008 -0400 + + radeon: Fix type in check for tmds type. + +commit e0cceb6c10a25d264c98711d88dc2e77568742f5 +Author: Kristian Høgsberg +Date: Mon Sep 22 12:00:57 2008 -0400 + + Store the buffer object backing the fb as a void pointer, not a handle. + + This lets us defer handle creation until userspace acutally asks for + one, at which point we also have a drm_file to associate it with. + +commit 6ee8ff0dc2b71c1ffe5e86db74bad8ff919efc56 +Author: Dave Airlie +Date: Sun Sep 21 09:48:51 2008 +1000 + + drm: fixups on top of rebase + +commit f1808c2414671023ebfbef59289cab7d22d7c15d +Author: Dave Airlie +Date: Sat Sep 20 03:08:05 2008 +1000 + + radeon: rmx_fixup() fixes for legacy chips + +commit 099cbd68de1e7fb821930f477c7dacad740d651b +Author: Dave Airlie +Date: Sat Sep 20 01:55:27 2008 +1000 + + radeon: fix combios + +commit 45e2f1c7c5d4c056bf8c664dab18accff332d4de +Author: Dave Airlie +Date: Fri Sep 19 09:17:36 2008 +1000 + + radeon: port Alexs patches from modesetting-gem + +commit 1e684c2536e3c901151fadb941b9d690b968ef36 +Author: Kristian Høgsberg +Date: Thu Sep 18 16:10:29 2008 -0400 + + radeon: Add DRM_ERROR() messages to all EINVAL exits from DRM_RADEON_CS. + + Shouldn't trigger under normal use and when something breaks, it will + be easier to debug. + +commit b64ede60d8521e60f1cb76995dfb96bd979e45f9 +Author: Kristian Høgsberg +Date: Thu Sep 18 16:07:50 2008 -0400 + + Allow R300_DST_PIPE_CONFIG for R420 and up. + + The X server emits writes to R300_DST_PIPE_CONFIG for R420 chipsets during + accel init. + +commit 0e10c1a8581e899d45bd11b94b08c789e998995f +Author: Kristian Høgsberg +Date: Thu Sep 18 14:53:46 2008 -0400 + + radeon kms: Get precedence right when computing PLL values. + +commit 3317a09a8d25c5256b1650eb096ce3fa0c83b08b +Author: Alex Deucher +Date: Thu Sep 18 09:55:14 2008 +1000 + + radeon: further LVDS fixes + +commit bc802cff5c3d0cae9c25b1ac874d2532f4ae6c78 +Author: Alex Deucher +Date: Thu Sep 18 09:54:12 2008 +1000 + + radeon: legacy lvds updates + +commit 606d03adcfa9da0293893d2147517c490c57a480 +Author: Dave Airlie +Date: Thu Sep 11 18:26:27 2008 +1000 + + radeon: fixup buffer and cs + + just fallback around busted stuff for now + +commit fadffa52fda2982d4e3925b670e38e820848eea8 +Author: Dave Airlie +Date: Thu Sep 11 18:25:18 2008 +1000 + + radeon: fixup GEM domain setting - allows more userspace paths + + also dirty buffer on validate + +commit 7e2106c5f5220733025b045a73deaf9a23522494 +Author: Dave Airlie +Date: Thu Sep 11 17:13:32 2008 +1000 + + radeon: disable blit moves + +commit 6668bc9004c180c0207abbe81e3421b130846c69 +Author: Dave Airlie +Date: Thu Sep 11 16:18:27 2008 +1000 + + radeon: fail properly if we can't create the ring. + + Normally this will be due to an AGP driver needing updating + +commit 7bbdc2685161c8b4ea4813cc6c8f2eb71fb37f74 +Author: Dave Airlie +Date: Wed Sep 10 14:35:43 2008 +1000 + + radeon: do proper memory controller init and setup + +commit 383e43f729777c6ec2975248b8592aef2f0f77fd +Author: Dave Airlie +Date: Wed Sep 10 14:35:08 2008 +1000 + + radeon: fix return value + +commit 967c273b26b3d0303971d57a79c9cd9a3783ed76 +Author: Dave Airlie +Date: Wed Sep 10 14:34:39 2008 +1000 + + radeon: fixup reference counting properly + +commit 5c9948d861b87f25a6b9b11f03cdc7f690a7c61a +Author: Dave Airlie +Date: Tue Sep 9 15:55:38 2008 +1000 + + radeon: sort out atom vs combios tables for r400 cards + +commit d8d06c4b245582e7e63d6cb05df0519ee4ae4dfa +Author: Dave Airlie +Date: Mon Sep 8 11:37:26 2008 +1000 + + radeon: remove unneeded debugging + +commit e8d344ab4644f9b4126472e4cd411715cb51c922 +Author: Dave Airlie +Date: Mon Sep 8 11:25:12 2008 +1000 + + radeon: don't do full edid for detection purposes + +commit b435e546014b0eb64a1a357c01fb6504e046818e +Author: Dave Airlie +Date: Mon Sep 8 10:27:48 2008 +1000 + + radeon: disable debugging that sneaky itself on + +commit 15e5a1302977f5e7e2bcf68e4315b4c0da3f8f6c +Author: Dave Airlie +Date: Mon Sep 8 10:20:44 2008 +1000 + + radeon: make text reserve 256k + +commit 4a732c876e68ba35c051a9b2b9b3f59af8aefb56 +Author: Dave Airlie +Date: Mon Sep 8 10:11:27 2008 +1000 + + radeon: don't disable VGA bits it breaks X later + +commit 6ba4fe118c71d81688aed0f6f29b541a8ec2aed5 +Author: Dave Airlie +Date: Fri Sep 5 16:12:59 2008 +1000 + + radeon: further suspend/resume support + +commit d3f08fa8d959ab86148522dd1d520765d0f70285 +Author: Dave Airlie +Date: Fri Aug 29 14:52:15 2008 +1000 + + radeon: add initial suspend/resume support + + this gets us back to fbcon.. its dirty like zebra + +commit 3095dc063ab426024bd79d661bb83d6001af3dfb +Author: Dave Airlie +Date: Fri Sep 5 11:15:03 2008 +1000 + + radeon: only enable KMS for radeon on x86 for now + +commit 07c2307f45955becc3085187951fd52cfa43a162 +Author: Dave Airlie +Date: Fri Sep 5 10:46:17 2008 +1000 + + radeon: rs690 GART tables need to be in uncached memory. + + Allocate the rs480/690 tables from uncached memory. + +commit 48b239ec875354114b1d1ab6052aaf995251af6a +Author: Dave Airlie +Date: Thu Sep 4 11:58:08 2008 +1000 + + drm/radeon: fixup some avivo/rs690 checks + +commit 886833770b27dc6c6c3c01264d8f06eb6490d4e8 +Author: Dave Airlie +Date: Fri Aug 29 09:59:02 2008 +1000 + + drm: move text mode check to driver. + + also change name from text to nomodeset + +commit 2ed5093e134819d82252c9acca3d30dea4358990 +Author: Dave Airlie +Date: Thu Aug 28 21:22:27 2008 +1000 + + radeon: fix up LVDS panel mode + + also don't explode on lack of DDC + +commit afc7519cbe2c4de0f5a0f5293f166302fe68c04d +Author: Dave Airlie +Date: Thu Aug 28 18:31:50 2008 +1000 + + drm: fix dev->master convert + +commit bfaf4eee5b30971c22ac713cdb27dd81ebd17cc8 +Author: Kristian Høgsberg +Date: Wed Aug 20 11:24:13 2008 -0400 + + drm: Set up a kernel side hw_lock, so userspace isn't required to do so. + + DRI2 doesn't use a user space lock, so don't require one to be set up. + Old DRI can still provide a lock containing map as before, which will + override the default kernel-side lock. + + Signed-off-by: Kristian Høgsberg + +commit 1abd1b2c2bae090cba345f575b5d308f635946d4 +Author: Dave Airlie +Date: Thu Aug 28 16:38:49 2008 +1000 + + radeon: fix LVDS on atombios - typos + +commit 38ca2227d3ea2f1f94078f727a1382ac6c15f2dd +Author: Dave Airlie +Date: Thu Aug 28 12:01:53 2008 +1000 + + radeon: limit LVDS to first CRTC for now + +commit 0b57a130d6c0df930bba26613bbe7ecc168708f1 +Author: Dave Airlie +Date: Thu Aug 28 12:01:38 2008 +1000 + + radeon: fixup checks for crtc in dpms path + +commit 4b84ec7890a67782503a81e7de3f43beffe383bb +Author: Dave Airlie +Date: Wed Aug 27 15:39:17 2008 +1000 + + radeon: fix after rebase + +commit a2bd95d420b509484d4453a3de57863b9f85fd2c +Author: Dave Airlie +Date: Wed Aug 27 13:43:04 2008 +1000 + + radeon: avoid oops on encdoers with no crtc set + +commit 9f76faef605ac6fe88f21780d33507cdbc740149 +Author: Alex Deucher +Date: Tue Aug 26 17:23:21 2008 +1000 + + radeon: fix warning from radeon_legacy_state removal + +commit 8a8bc2a197e5f516eb161c4dfdac9be45cffeb37 +Author: Alex Deucher +Date: Tue Aug 26 17:22:37 2008 +1000 + + radeon: first pass at legacy dac detect + + - done: primary dac, vga on tvdac + - todo: ext dac, tv on tvdac + +commit 207d8690fe812bbd12aadc0344588d2b9ee4f50c +Author: Alex Deucher +Date: Tue Aug 26 17:22:11 2008 +1000 + + radeon: first pass at bios scratch regs + + - todo: updated connected status + +commit 86cd902421db3c9aefa618c6f42ebf759f35bd23 +Author: Alex Deucher +Date: Tue Aug 26 17:20:54 2008 +1000 + + radeon: remove unused legacy state + +commit 1012aa17d033f7461898b5f618095594415637d5 +Author: Alex Deucher +Date: Tue Aug 26 17:20:15 2008 +1000 + + radeon: get primary dac adj info from bios tables + +commit df67aa03dcb80f34a2e82f244ed319097f8ad5dd +Author: Dave Airlie +Date: Tue Aug 26 17:03:13 2008 +1000 + + x86: export pat_enabled + +commit 2671bf82bad64867cbcefaeadc3d873a5b58849c +Author: Dave Airlie +Date: Tue Aug 26 17:02:43 2008 +1000 + + drm: enable PAT and writecombining support. + + If PAT is enabled, enable write combining support for kernel/user mappings + when pat is enabled. Also set memory to WC instead of uncached in ttm + +commit 1c6e9025d0a744fb2368c7e60e5959fbfd44a033 +Author: Dave Airlie +Date: Tue Aug 26 17:01:35 2008 +1000 + + radeon: add more domain support to GEM code. + + move domain validate function to separate function + call it from correct places + +commit 783c1755e1877bfb5d371d6aed9229e89193733a +Author: Dave Airlie +Date: Tue Aug 26 17:00:49 2008 +1000 + + radeon: implement zero fill for VRAM migration + + If a BO hasn't been dirtied, do a solid fill on VRAM instead of + migrating pages to VRAM + +commit 94b5bad6740b1c1bc91a884602fcae7556ac6f23 +Author: Dave Airlie +Date: Tue Aug 26 16:59:45 2008 +1000 + + drm: set clean flags in new flags so it doesn't disappear + +commit beeef46b78a3881d82010ccd2cf231f3b5558381 +Author: Dave Airlie +Date: Fri Aug 22 10:16:20 2008 +1000 + + radeon: wait for dma gui idle on 2D idle + +commit 83cfe866e83a632029194f82d08e3aa99eb43fc1 +Author: Dave Airlie +Date: Fri Aug 22 10:16:01 2008 +1000 + + radeon_gem: fix some misplace == + +commit b23d287bcaeefcd41360eb4bbff213b63b94417e +Author: Dave Airlie +Date: Fri Aug 22 10:13:25 2008 +1000 + + radeon: get buffer upload working + +commit 16f042a029f0c124881b54e3cdbc904f0102e63d +Author: Dave Airlie +Date: Fri Aug 22 10:12:57 2008 +1000 + + radeon: read back register between on gart flush + +commit e5f380ce6813b560f88734e5bc267cf7e0817df0 +Author: Dave Airlie +Date: Fri Aug 22 10:11:55 2008 +1000 + + drm: export buffer zeroing function for sw fallback + + We need a buffer zeroing function for before we have + accel running etc + +commit 1a329af605a7e97bf47d4d59c3fae48bd63bf48f +Author: Dave Airlie +Date: Fri Aug 22 09:49:16 2008 +1000 + + ttm: add clean bo flags + + This can be used to denote a bo hasn't been mapped or validated yet. + + Primarily for the move code when we get a buffer in VRAM, we don't need + to copy the contents just zero them. + +commit c1f33b48e0ceeb1217bd85941699868fc7c1b0af +Author: Dave Airlie +Date: Fri Aug 22 09:39:07 2008 +1000 + + ati_pcigart: add memory barrier and volatile for table access + +commit 865842501d07b53fe7da767ec6867b37791e0bd5 +Author: Dave Airlie +Date: Fri Aug 22 09:35:26 2008 +1000 + + drm: remove tlb flush logic from ttm + +commit a2509107e6f3102d31f23b1230ed5e4323f2e9ae +Author: Dave Airlie +Date: Tue Aug 19 12:15:10 2008 +1000 + + radeon: fix bug in scratch retreival + +commit 5c2e9d64b983df7d4a8fcfec6d97d8108fafe8dd +Author: Dave Airlie +Date: Tue Aug 19 12:14:38 2008 +1000 + + radeon: add GTT domain + +commit b896c635377305d250cdb3d1b70ec8f624d8f97f +Author: Dave Airlie +Date: Wed Aug 27 14:58:05 2008 +1000 + + radeon: add missing regs from a previous rebase + +commit 8b4b5376e7de4edbef6d9e74b401053124d69780 +Author: Dave Airlie +Date: Thu Aug 14 18:14:56 2008 +1000 + + disable modeset on < r300 + +commit 2b5b245b4da6245cc31dfbf62de08f630e985484 +Author: Dave Airlie +Date: Fri Aug 15 09:36:21 2008 +1000 + + radeon: set the base after mode is programmed + +commit 598f58b76744598718f9866399a4864b4bacee94 +Author: Dave Airlie +Date: Fri Aug 15 09:35:55 2008 +1000 + + radeon: fix LVDS modes problem + +commit 58a4fee7d55860eb660c8eb605c36d79cbefc0f5 +Author: Dave Airlie +Date: Thu Aug 14 18:06:31 2008 +1000 + + radeon: reserve 64k of VRAM for now for text mode so we don't trample it + +commit 3da833e454e074f5d2205faeb794d032cd43a868 +Author: Dave Airlie +Date: Thu Aug 14 14:38:27 2008 +1000 + + radeon: add support for init memory map + + This gets RN50 to initialise correctly + +commit 8b5b7391d85945b77636b0cf892ac8600dc1c4d5 +Author: Dave Airlie +Date: Thu Aug 14 14:37:25 2008 +1000 + + radeon: add copy/solid regs for rn50 + +commit 7ae2d31976e715eaae3e2887905e32f5a3add66d +Author: Alex Deucher +Date: Thu Aug 14 09:59:47 2008 +1000 + + radeon: fill in and make use of more combios tables + +commit 049d18c8d1a6c845f178d271f63606d621579f86 +Author: Dave Airlie +Date: Thu Aug 14 09:59:31 2008 +1000 + + radeon: add quirks from DDX + +commit 4d68722e470b2583bf60a30e7c08b009cb170589 +Author: Alex Deucher +Date: Thu Aug 14 09:59:12 2008 +1000 + + radeon: fix warnings + +commit 9ee29ec1a5576b1f0773bd6be690bab3aef23784 +Author: Alex Deucher +Date: Thu Aug 14 09:58:47 2008 +1000 + + radeon: get legacy working + + - extra ~ in RADEON_WRITE_P() + - re-arrange crtc setup a bit + - add debugging for tracing calls + - fix pitch calculation + +commit b9671e487a21fa72743c1b7b89108664b1288441 +Author: Alex Deucher +Date: Thu Aug 14 09:58:24 2008 +1000 + + radeon: set base in legacy crtc mode set + +commit 648c78fb282cac9d91aff976fe1887b94e7a35d8 +Author: Alex Deucher +Date: Thu Aug 14 09:55:06 2008 +1000 + + radeon: Convert COM BIOS to table offset lookup function + +commit 5f8722541cd411058b9138e57fc3649cdafb2857 +Author: Alex Deucher +Date: Thu Aug 14 09:54:03 2008 +1000 + + radeon/cursor: Restructure cursor handling and add support for legacy cursors + +commit 3166da64d45c0f9b4cf1cafe263efabfc8684586 +Author: Alex Deucher +Date: Thu Aug 14 09:53:08 2008 +1000 + + radeon/atom: implement crtc lock + +commit 70a082a3b8c67fbe034c8d61fdaccce48ef4e4b8 +Author: Alex Deucher +Date: Thu Aug 14 09:50:15 2008 +1000 + + LUT updates + + - Add gamma set for legacy chips + - Add 16 bpp gamma set + +commit c8970aca145e810f0a53f71bc0faab9f17325468 +Author: Alex Deucher +Date: Thu Aug 14 09:49:55 2008 +1000 + + radeon: various cleanups + + - white space + - move i2c_lock to radeon_i2c.c + - enable tv dac on legacy + +commit cd98bdf8447781537345fde1007fda454b7f9b5b +Author: Alex Deucher +Date: Thu Aug 14 09:49:30 2008 +1000 + + radeon: Add legacy dac detect stubs + +commit 34620e4554a20a802239032180c95e61ac956995 +Author: Alex Deucher +Date: Thu Aug 14 09:49:12 2008 +1000 + + unify connector, i2c handling for atom and legacy + +commit 22d018d75a21c44bdeb195a0a7a4f0b03639b980 +Author: Alex Deucher +Date: Thu Aug 14 09:48:50 2008 +1000 + + Brute force port of legacy crtc/encoder code + + - removed save/init/restore chain with set functions + +commit b2305e537574bd8174b4977db3061827dcf88fed +Author: Dave Airlie +Date: Thu Aug 14 08:54:04 2008 +1000 + + FEDORA: radeon set gart buffers start + +commit 8018f8d689c3dc6c64be92072b23865adf324792 +Author: Dave Airlie +Date: Thu Aug 14 08:52:41 2008 +1000 + + radeon: command submission remove debug + +commit 81085b832c2a768c1f8b6fdd454af11805c18521 +Author: Dave Airlie +Date: Thu Aug 14 08:51:00 2008 +1000 + + radeon: use mm_enabled variable to denote memory manager running + +commit 0582b606dc9f5dfb0718df3a8005edbd2264a16e +Author: Dave Airlie +Date: Tue Aug 12 12:19:05 2008 +1000 + + radeon: hack it up so we get front/back offsets + + in the correct place in the drm so buffer swaps work again + +commit 40e9e98e33b07535a17200062221702e850329e1 +Author: Dave Airlie +Date: Tue Aug 12 12:18:08 2008 +1000 + + radeon: add mm supported call for userspace + +commit 3910ad9e1c12b3466e191a0bbeb5c93f9322b5f2 +Author: Dave Airlie +Date: Tue Aug 12 09:40:18 2008 +1000 + + FEDORA: add old DMA buffers on top of GEM + +commit 003e5ec47e7df307101110e4b2814d76ba47e390 +Author: Dave Airlie +Date: Fri Aug 8 15:57:10 2008 +1000 + + radeon: add initial code to support legacy crtc/encoders. + + This adds the CRTC and PLL setting code, it doesn't work + yet but its all heading in the right direction. + +commit b1bc6db550ab7903d733601970887426d575ec03 +Author: Dave Airlie +Date: Fri Aug 8 10:11:23 2008 +1000 + + radeon: add initial tmds parsing for legacy cards + +commit bb421d7a8349864f5b231550e79b387b2d11a31a +Author: Dave Airlie +Date: Wed Aug 6 16:01:22 2008 +1000 + + radeon: set new memmap on gem enable + +commit a0532d2b688d1c957a8516a4415f7ac881731034 +Author: Dave Airlie +Date: Wed Aug 6 15:51:02 2008 +1000 + + radeon/pci: fixup table when GEM allocates it + +commit d97c4d21905d332cc293d1118c84e690cedd6805 +Author: Dave Airlie +Date: Wed Aug 6 15:44:38 2008 +1000 + + radeon: set gart table size + +commit 37140845e116bfcb9b2af4722328c2f0598df386 +Author: Dave Airlie +Date: Wed Aug 6 15:44:18 2008 +1000 + + radeon: fix buffer evict slection + +commit d7a07c9ea90b6392f6b89e46656e247e59d2fbf0 +Author: Dave Airlie +Date: Wed Aug 6 15:43:51 2008 +1000 + + pcigart: fix incorrect memset + no need for wbinvd + +commit ff45826ff23d103843791064eb81060966a145b8 +Author: Dave Airlie +Date: Tue Aug 5 11:22:24 2008 +1000 + + radeon: fix defines so blit works again + +commit d7d4177ae8d8a91d0b14b31ae736a69824e2d04a +Author: Dave Airlie +Date: Mon Aug 4 17:10:35 2008 +1000 + + drm: leave bo driver finish to the driver + +commit 8ee6b933237276b3971f91d57357d6b37da33dd6 +Author: Dave Airlie +Date: Mon Aug 4 14:21:22 2008 +1000 + + drm: fix unneeded debug + +commit 703e09f7de1025467b991bf9083dd5aca64a6148 +Author: Dave Airlie +Date: Mon Aug 4 14:20:47 2008 +1000 + + radeon: add setparam for userspace to init the memory manager. + + if kms enabled memory manager will be enabled by default + +commit 66c0ef85801ce1ac9958d545e858f3ee188b3042 +Author: Dave Airlie +Date: Mon Aug 4 14:20:26 2008 +1000 + + drm: kill bo after driver lastclose + +commit 7412d483e6a388d0bd0aa2581c73e5612e6a8c63 +Author: Dave Airlie +Date: Mon Aug 4 11:32:45 2008 +1000 + + radeon: don't invalidate cache if CP isn't running + +commit cf6ad022e1c8f3a1c5b78164d08be2ad3245ecf0 +Author: Dave Airlie +Date: Sat Aug 2 08:06:26 2008 +1000 + + drm: fix release locking + +commit 65e2c8ca6e4f7a803e186bb80ebe9c09f862164a +Author: Dave Airlie +Date: Fri Aug 1 16:55:34 2008 +1000 + + drm: add radeon modesetting support + +commit 03b60f68076f34ece092bcafa36b470bc804a246 +Author: Dave Airlie +Date: Fri Aug 1 14:06:12 2008 +1000 + + drm: add TTM VM changes + +commit 4e4e1d96aa12ae2c0a88acc518f9d0e74f756a7e +Author: Dave Airlie +Date: Thu Jul 31 15:27:27 2008 +1000 + + drm: import TTM basic objects + +commit af0cea8a20b9b38595fa577a15c13572064330f0 +Author: Dave Airlie +Date: Fri Sep 5 11:26:55 2008 +1000 + + drm: export drm_i915_flip_t type to userspace to build Mesa +diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c +index eb1bf00..046b89a 100644 +--- a/arch/x86/mm/pat.c ++++ b/arch/x86/mm/pat.c +@@ -11,6 +11,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -29,6 +30,7 @@ + + #ifdef CONFIG_X86_PAT + int __read_mostly pat_enabled = 1; ++EXPORT_SYMBOL_GPL(pat_enabled); + + void __cpuinit pat_disable(char *reason) + { +diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile +index 30022c4..9aba961 100644 +--- a/drivers/gpu/drm/Makefile ++++ b/drivers/gpu/drm/Makefile +@@ -10,8 +10,9 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \ + drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \ + drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \ + drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o \ +- drm_crtc.o drm_crtc_helper.o drm_modes.o drm_edid.o +- ++ drm_crtc.o drm_crtc_helper.o drm_modes.o drm_edid.o drm_uncached.o \ ++ drm_bo.o drm_bo_move.o drm_fence.o drm_ttm.o ++ + drm-$(CONFIG_COMPAT) += drm_ioc32.o + + obj-$(CONFIG_DRM) += drm.o +diff --git a/drivers/gpu/drm/ati_pcigart.c b/drivers/gpu/drm/ati_pcigart.c +index c533d0c..adc57dd 100644 +--- a/drivers/gpu/drm/ati_pcigart.c ++++ b/drivers/gpu/drm/ati_pcigart.c +@@ -34,9 +34,55 @@ + #include "drmP.h" + + # define ATI_PCIGART_PAGE_SIZE 4096 /**< PCI GART page size */ ++# define ATI_PCIGART_PAGE_MASK (~(ATI_PCIGART_PAGE_SIZE-1)) + +-static int drm_ati_alloc_pcigart_table(struct drm_device *dev, +- struct drm_ati_pcigart_info *gart_info) ++#define ATI_PCIE_WRITE 0x4 ++#define ATI_PCIE_READ 0x8 ++ ++static __inline__ void gart_insert_page_into_table(struct drm_ati_pcigart_info *gart_info, dma_addr_t addr, volatile u32 *pci_gart) ++{ ++ u32 page_base; ++ ++ page_base = (u32)addr & ATI_PCIGART_PAGE_MASK; ++ switch(gart_info->gart_reg_if) { ++ case DRM_ATI_GART_IGP: ++ page_base |= (upper_32_bits(addr) & 0xff) << 4; ++ page_base |= 0xc; ++ break; ++ case DRM_ATI_GART_PCIE: ++ page_base >>= 8; ++ page_base |= (upper_32_bits(addr) & 0xff) << 24; ++ page_base |= ATI_PCIE_READ | ATI_PCIE_WRITE; ++ break; ++ default: ++ case DRM_ATI_GART_PCI: ++ break; ++ } ++ *pci_gart = cpu_to_le32(page_base); ++} ++ ++static __inline__ dma_addr_t gart_get_page_from_table(struct drm_ati_pcigart_info *gart_info, volatile u32 *pci_gart) ++{ ++ dma_addr_t retval; ++ switch(gart_info->gart_reg_if) { ++ case DRM_ATI_GART_IGP: ++ retval = (*pci_gart & ATI_PCIGART_PAGE_MASK); ++ retval += (((*pci_gart & 0xf0) >> 4) << 16) << 16; ++ break; ++ case DRM_ATI_GART_PCIE: ++ retval = (*pci_gart & ~0xc); ++ retval <<= 8; ++ break; ++ case DRM_ATI_GART_PCI: ++ retval = *pci_gart; ++ break; ++ } ++ ++ return retval; ++} ++ ++int drm_ati_alloc_pcigart_table(struct drm_device *dev, ++ struct drm_ati_pcigart_info *gart_info) + { + gart_info->table_handle = drm_pci_alloc(dev, gart_info->table_size, + PAGE_SIZE, +@@ -44,12 +90,25 @@ static int drm_ati_alloc_pcigart_table(struct drm_device *dev, + if (gart_info->table_handle == NULL) + return -ENOMEM; + ++#ifdef CONFIG_X86 ++ /* IGPs only exist on x86 in any case */ ++ if (gart_info->gart_reg_if == DRM_ATI_GART_IGP) ++ set_memory_uc((unsigned long)gart_info->table_handle->vaddr, gart_info->table_size >> PAGE_SHIFT); ++#endif ++ ++ memset(gart_info->table_handle->vaddr, 0, gart_info->table_size); + return 0; + } ++EXPORT_SYMBOL(drm_ati_alloc_pcigart_table); + + static void drm_ati_free_pcigart_table(struct drm_device *dev, + struct drm_ati_pcigart_info *gart_info) + { ++#ifdef CONFIG_X86 ++ /* IGPs only exist on x86 in any case */ ++ if (gart_info->gart_reg_if == DRM_ATI_GART_IGP) ++ set_memory_wb((unsigned long)gart_info->table_handle->vaddr, gart_info->table_size >> PAGE_SHIFT); ++#endif + drm_pci_free(dev, gart_info->table_handle); + gart_info->table_handle = NULL; + } +@@ -63,7 +122,6 @@ int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info + + /* we need to support large memory configurations */ + if (!entry) { +- DRM_ERROR("no scatter/gather memory!\n"); + return 0; + } + +@@ -98,17 +156,14 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga + struct drm_sg_mem *entry = dev->sg; + void *address = NULL; + unsigned long pages; +- u32 *pci_gart, page_base; ++ u32 *pci_gart; + dma_addr_t bus_address = 0; + int i, j, ret = 0; + int max_pages; ++ dma_addr_t entry_addr; + +- if (!entry) { +- DRM_ERROR("no scatter/gather memory!\n"); +- goto done; +- } + +- if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) { ++ if (gart_info->gart_table_location == DRM_ATI_GART_MAIN && gart_info->table_handle == NULL) { + DRM_DEBUG("PCI: no table in VRAM: using normal RAM\n"); + + ret = drm_ati_alloc_pcigart_table(dev, gart_info); +@@ -116,15 +171,19 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga + DRM_ERROR("cannot allocate PCI GART page!\n"); + goto done; + } ++ } + ++ if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) { + address = gart_info->table_handle->vaddr; + bus_address = gart_info->table_handle->busaddr; + } else { + address = gart_info->addr; + bus_address = gart_info->bus_addr; +- DRM_DEBUG("PCI: Gart Table: VRAM %08LX mapped at %08lX\n", +- (unsigned long long)bus_address, +- (unsigned long)address); ++ } ++ ++ if (!entry) { ++ DRM_ERROR("no scatter/gather memory!\n"); ++ goto done; + } + + pci_gart = (u32 *) address; +@@ -133,8 +192,6 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga + pages = (entry->pages <= max_pages) + ? entry->pages : max_pages; + +- memset(pci_gart, 0, max_pages * sizeof(u32)); +- + for (i = 0; i < pages; i++) { + /* we need to support large memory configurations */ + entry->busaddr[i] = pci_map_page(dev->pdev, entry->pagelist[i], +@@ -146,32 +203,18 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga + bus_address = 0; + goto done; + } +- page_base = (u32) entry->busaddr[i]; + ++ entry_addr = entry->busaddr[i]; + for (j = 0; j < (PAGE_SIZE / ATI_PCIGART_PAGE_SIZE); j++) { +- switch(gart_info->gart_reg_if) { +- case DRM_ATI_GART_IGP: +- *pci_gart = cpu_to_le32((page_base) | 0xc); +- break; +- case DRM_ATI_GART_PCIE: +- *pci_gart = cpu_to_le32((page_base >> 8) | 0xc); +- break; +- default: +- case DRM_ATI_GART_PCI: +- *pci_gart = cpu_to_le32(page_base); +- break; +- } ++ gart_insert_page_into_table(gart_info, entry_addr, pci_gart); + pci_gart++; +- page_base += ATI_PCIGART_PAGE_SIZE; ++ entry_addr += ATI_PCIGART_PAGE_SIZE; + } + } ++ + ret = 1; + +-#if defined(__i386__) || defined(__x86_64__) +- wbinvd(); +-#else + mb(); +-#endif + + done: + gart_info->addr = address; +@@ -179,3 +222,142 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga + return ret; + } + EXPORT_SYMBOL(drm_ati_pcigart_init); ++ ++static int ati_pcigart_needs_unbind_cache_adjust(struct drm_ttm_backend *backend) ++{ ++ return ((backend->flags & DRM_BE_FLAG_BOUND_CACHED) ? 0 : 1); ++} ++ ++static int ati_pcigart_populate(struct drm_ttm_backend *backend, ++ unsigned long num_pages, ++ struct page **pages, ++ struct page *dummy_read_page) ++{ ++ struct ati_pcigart_ttm_backend *atipci_be = ++ container_of(backend, struct ati_pcigart_ttm_backend, backend); ++ ++ atipci_be->pages = pages; ++ atipci_be->num_pages = num_pages; ++ atipci_be->populated = 1; ++ return 0; ++} ++ ++static int ati_pcigart_bind_ttm(struct drm_ttm_backend *backend, ++ struct drm_bo_mem_reg *bo_mem) ++{ ++ struct ati_pcigart_ttm_backend *atipci_be = ++ container_of(backend, struct ati_pcigart_ttm_backend, backend); ++ off_t j; ++ int i; ++ struct drm_ati_pcigart_info *info = atipci_be->gart_info; ++ volatile u32 *pci_gart; ++ dma_addr_t offset = bo_mem->mm_node->start; ++ dma_addr_t page_base; ++ ++ pci_gart = info->addr; ++ ++ j = offset; ++ while (j < (offset + atipci_be->num_pages)) { ++ if (gart_get_page_from_table(info, pci_gart + j)) ++ return -EBUSY; ++ j++; ++ } ++ ++ for (i = 0, j = offset; i < atipci_be->num_pages; i++, j++) { ++ struct page *cur_page = atipci_be->pages[i]; ++ /* write value */ ++ page_base = page_to_phys(cur_page); ++ gart_insert_page_into_table(info, page_base, pci_gart + j); ++ } ++ ++ mb(); ++ atipci_be->gart_flush_fn(atipci_be->dev); ++ ++ atipci_be->bound = 1; ++ atipci_be->offset = offset; ++ /* need to traverse table and add entries */ ++ DRM_DEBUG("\n"); ++ return 0; ++} ++ ++static int ati_pcigart_unbind_ttm(struct drm_ttm_backend *backend) ++{ ++ struct ati_pcigart_ttm_backend *atipci_be = ++ container_of(backend, struct ati_pcigart_ttm_backend, backend); ++ struct drm_ati_pcigart_info *info = atipci_be->gart_info; ++ unsigned long offset = atipci_be->offset; ++ int i; ++ off_t j; ++ volatile u32 *pci_gart = info->addr; ++ ++ if (atipci_be->bound != 1) ++ return -EINVAL; ++ ++ for (i = 0, j = offset; i < atipci_be->num_pages; i++, j++) { ++ *(pci_gart + j) = 0; ++ } ++ ++ mb(); ++ atipci_be->gart_flush_fn(atipci_be->dev); ++ atipci_be->bound = 0; ++ atipci_be->offset = 0; ++ return 0; ++} ++ ++static void ati_pcigart_clear_ttm(struct drm_ttm_backend *backend) ++{ ++ struct ati_pcigart_ttm_backend *atipci_be = ++ container_of(backend, struct ati_pcigart_ttm_backend, backend); ++ ++ DRM_DEBUG("\n"); ++ if (atipci_be->pages) { ++ backend->func->unbind(backend); ++ atipci_be->pages = NULL; ++ ++ } ++ atipci_be->num_pages = 0; ++} ++ ++static void ati_pcigart_destroy_ttm(struct drm_ttm_backend *backend) ++{ ++ struct ati_pcigart_ttm_backend *atipci_be; ++ if (backend) { ++ DRM_DEBUG("\n"); ++ atipci_be = container_of(backend, struct ati_pcigart_ttm_backend, backend); ++ if (atipci_be) { ++ if (atipci_be->pages) { ++ backend->func->clear(backend); ++ } ++ drm_ctl_free(atipci_be, sizeof(*atipci_be), DRM_MEM_TTM); ++ } ++ } ++} ++ ++static struct drm_ttm_backend_func ati_pcigart_ttm_backend = ++{ ++ .needs_ub_cache_adjust = ati_pcigart_needs_unbind_cache_adjust, ++ .populate = ati_pcigart_populate, ++ .clear = ati_pcigart_clear_ttm, ++ .bind = ati_pcigart_bind_ttm, ++ .unbind = ati_pcigart_unbind_ttm, ++ .destroy = ati_pcigart_destroy_ttm, ++}; ++ ++struct drm_ttm_backend *ati_pcigart_init_ttm(struct drm_device *dev, struct drm_ati_pcigart_info *info, void (*gart_flush_fn)(struct drm_device *dev)) ++{ ++ struct ati_pcigart_ttm_backend *atipci_be; ++ ++ atipci_be = drm_ctl_calloc(1, sizeof (*atipci_be), DRM_MEM_TTM); ++ if (!atipci_be) ++ return NULL; ++ ++ atipci_be->populated = 0; ++ atipci_be->backend.func = &ati_pcigart_ttm_backend; ++// atipci_be->backend.mem_type = DRM_BO_MEM_TT; ++ atipci_be->gart_info = info; ++ atipci_be->gart_flush_fn = gart_flush_fn; ++ atipci_be->dev = dev; ++ ++ return &atipci_be->backend; ++} ++EXPORT_SYMBOL(ati_pcigart_init_ttm); +diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c +index 3d33b82..e048aa2 100644 +--- a/drivers/gpu/drm/drm_agpsupport.c ++++ b/drivers/gpu/drm/drm_agpsupport.c +@@ -496,6 +496,177 @@ drm_agp_bind_pages(struct drm_device *dev, + } + EXPORT_SYMBOL(drm_agp_bind_pages); + ++/* ++ * AGP ttm backend interface. ++ */ ++ ++#ifndef AGP_USER_TYPES ++#define AGP_USER_TYPES (1 << 16) ++#define AGP_USER_MEMORY (AGP_USER_TYPES) ++#define AGP_USER_CACHED_MEMORY (AGP_USER_TYPES + 1) ++#endif ++#define AGP_REQUIRED_MAJOR 0 ++#define AGP_REQUIRED_MINOR 102 ++ ++static int drm_agp_needs_unbind_cache_adjust(struct drm_ttm_backend *backend) ++{ ++ return ((backend->flags & DRM_BE_FLAG_BOUND_CACHED) ? 0 : 1); ++} ++ ++ ++static int drm_agp_populate(struct drm_ttm_backend *backend, ++ unsigned long num_pages, struct page **pages, ++ struct page *dummy_read_page) ++{ ++ struct drm_agp_ttm_backend *agp_be = ++ container_of(backend, struct drm_agp_ttm_backend, backend); ++ struct page **cur_page, **last_page = pages + num_pages; ++ DRM_AGP_MEM *mem; ++ int dummy_page_count = 0; ++ ++ if (drm_alloc_memctl(num_pages * sizeof(void *))) ++ return -1; ++ ++ DRM_DEBUG("drm_agp_populate_ttm\n"); ++ mem = drm_agp_allocate_memory(agp_be->bridge, num_pages, AGP_USER_MEMORY); ++ if (!mem) { ++ drm_free_memctl(num_pages * sizeof(void *)); ++ return -1; ++ } ++ ++ DRM_DEBUG("Current page count is %ld\n", (long) mem->page_count); ++ mem->page_count = 0; ++ for (cur_page = pages; cur_page < last_page; ++cur_page) { ++ struct page *page = *cur_page; ++ if (!page) { ++ page = dummy_read_page; ++ ++dummy_page_count; ++ } ++ mem->memory[mem->page_count++] = phys_to_gart(page_to_phys(page)); ++ } ++ if (dummy_page_count) ++ DRM_DEBUG("Mapped %d dummy pages\n", dummy_page_count); ++ agp_be->mem = mem; ++ return 0; ++} ++ ++static int drm_agp_bind_ttm(struct drm_ttm_backend *backend, ++ struct drm_bo_mem_reg *bo_mem) ++{ ++ struct drm_agp_ttm_backend *agp_be = ++ container_of(backend, struct drm_agp_ttm_backend, backend); ++ DRM_AGP_MEM *mem = agp_be->mem; ++ int ret; ++ int snooped = (bo_mem->flags & DRM_BO_FLAG_CACHED) && !(bo_mem->flags & DRM_BO_FLAG_CACHED_MAPPED); ++ ++ DRM_DEBUG("drm_agp_bind_ttm\n"); ++ mem->is_flushed = true; ++ mem->type = AGP_USER_MEMORY; ++ /* CACHED MAPPED implies not snooped memory */ ++ if (snooped) ++ mem->type = AGP_USER_CACHED_MEMORY; ++ ++ ret = drm_agp_bind_memory(mem, bo_mem->mm_node->start); ++ if (ret) ++ DRM_ERROR("AGP Bind memory failed\n"); ++ ++ DRM_FLAG_MASKED(backend->flags, (bo_mem->flags & DRM_BO_FLAG_CACHED) ? ++ DRM_BE_FLAG_BOUND_CACHED : 0, ++ DRM_BE_FLAG_BOUND_CACHED); ++ return ret; ++} ++ ++static int drm_agp_unbind_ttm(struct drm_ttm_backend *backend) ++{ ++ struct drm_agp_ttm_backend *agp_be = ++ container_of(backend, struct drm_agp_ttm_backend, backend); ++ ++ DRM_DEBUG("drm_agp_unbind_ttm\n"); ++ if (agp_be->mem->is_bound) ++ return drm_agp_unbind_memory(agp_be->mem); ++ else ++ return 0; ++} ++ ++static void drm_agp_clear_ttm(struct drm_ttm_backend *backend) ++{ ++ struct drm_agp_ttm_backend *agp_be = ++ container_of(backend, struct drm_agp_ttm_backend, backend); ++ DRM_AGP_MEM *mem = agp_be->mem; ++ ++ DRM_DEBUG("drm_agp_clear_ttm\n"); ++ if (mem) { ++ unsigned long num_pages = mem->page_count; ++ backend->func->unbind(backend); ++ agp_free_memory(mem); ++ drm_free_memctl(num_pages * sizeof(void *)); ++ } ++ agp_be->mem = NULL; ++} ++ ++static void drm_agp_destroy_ttm(struct drm_ttm_backend *backend) ++{ ++ struct drm_agp_ttm_backend *agp_be; ++ ++ if (backend) { ++ DRM_DEBUG("drm_agp_destroy_ttm\n"); ++ agp_be = container_of(backend, struct drm_agp_ttm_backend, backend); ++ if (agp_be) { ++ if (agp_be->mem) ++ backend->func->clear(backend); ++ drm_ctl_free(agp_be, sizeof(*agp_be), DRM_MEM_TTM); ++ } ++ } ++} ++ ++static struct drm_ttm_backend_func agp_ttm_backend = { ++ .needs_ub_cache_adjust = drm_agp_needs_unbind_cache_adjust, ++ .populate = drm_agp_populate, ++ .clear = drm_agp_clear_ttm, ++ .bind = drm_agp_bind_ttm, ++ .unbind = drm_agp_unbind_ttm, ++ .destroy = drm_agp_destroy_ttm, ++}; ++ ++struct drm_ttm_backend *drm_agp_init_ttm(struct drm_device *dev) ++{ ++ ++ struct drm_agp_ttm_backend *agp_be; ++ struct agp_kern_info *info; ++ ++ if (!dev->agp) { ++ DRM_ERROR("AGP is not initialized.\n"); ++ return NULL; ++ } ++ info = &dev->agp->agp_info; ++ ++ if (info->version.major != AGP_REQUIRED_MAJOR || ++ info->version.minor < AGP_REQUIRED_MINOR) { ++ DRM_ERROR("Wrong agpgart version %d.%d\n" ++ "\tYou need at least version %d.%d.\n", ++ info->version.major, ++ info->version.minor, ++ AGP_REQUIRED_MAJOR, ++ AGP_REQUIRED_MINOR); ++ return NULL; ++ } ++ ++ ++ agp_be = drm_ctl_calloc(1, sizeof(*agp_be), DRM_MEM_TTM); ++ if (!agp_be) ++ return NULL; ++ ++ agp_be->mem = NULL; ++ ++ agp_be->bridge = dev->agp->bridge; ++ agp_be->populated = false; ++ agp_be->backend.func = &agp_ttm_backend; ++ agp_be->backend.dev = dev; ++ ++ return &agp_be->backend; ++} ++EXPORT_SYMBOL(drm_agp_init_ttm); ++ + void drm_agp_chipset_flush(struct drm_device *dev) + { + agp_flush_chipset(dev->agp->bridge); +diff --git a/drivers/gpu/drm/drm_bo.c b/drivers/gpu/drm/drm_bo.c +new file mode 100644 +index 0000000..8a38f4a +--- /dev/null ++++ b/drivers/gpu/drm/drm_bo.c +@@ -0,0 +1,2162 @@ ++/************************************************************************** ++ * ++ * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sub license, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial portions ++ * of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, ++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR ++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE ++ * USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ **************************************************************************/ ++/* ++ * Authors: Thomas Hellström ++ */ ++ ++#include "drmP.h" ++ ++/* ++ * Locking may look a bit complicated but isn't really: ++ * ++ * The buffer usage atomic_t needs to be protected by dev->struct_mutex ++ * when there is a chance that it can be zero before or after the operation. ++ * ++ * dev->struct_mutex also protects all lists and list heads, ++ * Hash tables and hash heads. ++ * ++ * bo->mutex protects the buffer object itself excluding the usage field. ++ * bo->mutex does also protect the buffer list heads, so to manipulate those, ++ * we need both the bo->mutex and the dev->struct_mutex. ++ * ++ * Locking order is bo->mutex, dev->struct_mutex. Therefore list traversal ++ * is a bit complicated. When dev->struct_mutex is released to grab bo->mutex, ++ * the list traversal will, in general, need to be restarted. ++ * ++ */ ++ ++static void drm_bo_destroy_locked(struct drm_buffer_object *bo); ++static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo); ++static void drm_bo_unmap_virtual(struct drm_buffer_object *bo); ++ ++static inline uint64_t drm_bo_type_flags(unsigned type) ++{ ++ return (1ULL << (24 + type)); ++} ++ ++/* ++ * bo locked. dev->struct_mutex locked. ++ */ ++ ++void drm_bo_add_to_pinned_lru(struct drm_buffer_object *bo) ++{ ++ struct drm_mem_type_manager *man; ++ ++ DRM_ASSERT_LOCKED(&bo->dev->struct_mutex); ++ DRM_ASSERT_LOCKED(&bo->mutex); ++ ++ man = &bo->dev->bm.man[bo->pinned_mem_type]; ++ list_add_tail(&bo->pinned_lru, &man->pinned); ++} ++ ++void drm_bo_add_to_lru(struct drm_buffer_object *bo) ++{ ++ struct drm_mem_type_manager *man; ++ ++ DRM_ASSERT_LOCKED(&bo->dev->struct_mutex); ++ ++ if (!(bo->mem.proposed_flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT)) ++ || bo->mem.mem_type != bo->pinned_mem_type) { ++ man = &bo->dev->bm.man[bo->mem.mem_type]; ++ list_add_tail(&bo->lru, &man->lru); ++ } else { ++ INIT_LIST_HEAD(&bo->lru); ++ } ++} ++ ++static int drm_bo_vm_pre_move(struct drm_buffer_object *bo, int old_is_pci) ++{ ++#ifdef DRM_ODD_MM_COMPAT ++ int ret; ++ ++ if (!bo->map_list.map) ++ return 0; ++ ++ ret = drm_bo_lock_kmm(bo); ++ if (ret) ++ return ret; ++ drm_bo_unmap_virtual(bo); ++ if (old_is_pci) ++ drm_bo_finish_unmap(bo); ++#else ++ if (!bo->map_list.map) ++ return 0; ++ ++ drm_bo_unmap_virtual(bo); ++#endif ++ return 0; ++} ++ ++static void drm_bo_vm_post_move(struct drm_buffer_object *bo) ++{ ++#ifdef DRM_ODD_MM_COMPAT ++ int ret; ++ ++ if (!bo->map_list.map) ++ return; ++ ++ ret = drm_bo_remap_bound(bo); ++ if (ret) { ++ DRM_ERROR("Failed to remap a bound buffer object.\n" ++ "\tThis might cause a sigbus later.\n"); ++ } ++ drm_bo_unlock_kmm(bo); ++#endif ++} ++ ++/* ++ * Call bo->mutex locked. ++ */ ++ ++int drm_bo_add_ttm(struct drm_buffer_object *bo) ++{ ++ struct drm_device *dev = bo->dev; ++ int ret = 0; ++ uint32_t page_flags = 0; ++ ++ DRM_ASSERT_LOCKED(&bo->mutex); ++ bo->ttm = NULL; ++ ++ if (bo->mem.proposed_flags & DRM_BO_FLAG_WRITE) ++ page_flags |= DRM_TTM_PAGE_WRITE; ++ ++ switch (bo->type) { ++ case drm_bo_type_device: ++ case drm_bo_type_kernel: ++ bo->ttm = drm_ttm_create(dev, bo->num_pages << PAGE_SHIFT, ++ page_flags, dev->bm.dummy_read_page); ++ if (!bo->ttm) ++ ret = -ENOMEM; ++ break; ++ case drm_bo_type_user: ++ bo->ttm = drm_ttm_create(dev, bo->num_pages << PAGE_SHIFT, ++ page_flags | DRM_TTM_PAGE_USER, ++ dev->bm.dummy_read_page); ++ if (!bo->ttm) ++ ret = -ENOMEM; ++ ++ ret = drm_ttm_set_user(bo->ttm, current, ++ bo->buffer_start, ++ bo->num_pages); ++ if (ret) ++ return ret; ++ ++ break; ++ default: ++ DRM_ERROR("Illegal buffer object type\n"); ++ ret = -EINVAL; ++ break; ++ } ++ ++ return ret; ++} ++EXPORT_SYMBOL(drm_bo_add_ttm); ++ ++static int drm_bo_handle_move_mem(struct drm_buffer_object *bo, ++ struct drm_bo_mem_reg *mem, ++ int evict, int no_wait) ++{ ++ struct drm_device *dev = bo->dev; ++ struct drm_buffer_manager *bm = &dev->bm; ++ int old_is_pci = drm_mem_reg_is_pci(dev, &bo->mem); ++ int new_is_pci = drm_mem_reg_is_pci(dev, mem); ++ struct drm_mem_type_manager *old_man = &bm->man[bo->mem.mem_type]; ++ struct drm_mem_type_manager *new_man = &bm->man[mem->mem_type]; ++ int ret = 0; ++ ++ if (old_is_pci || new_is_pci || ++ ((mem->flags ^ bo->mem.flags) & DRM_BO_FLAG_CACHED)) ++ ret = drm_bo_vm_pre_move(bo, old_is_pci); ++ if (ret) ++ return ret; ++ ++ /* ++ * Create and bind a ttm if required. ++ */ ++ ++ if (!(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm == NULL)) { ++ ret = drm_bo_add_ttm(bo); ++ if (ret) ++ goto out_err; ++ ++ if (mem->mem_type != DRM_BO_MEM_LOCAL) { ++ ret = drm_ttm_bind(bo->ttm, mem); ++ if (ret) ++ goto out_err; ++ } ++ ++ if (bo->mem.mem_type == DRM_BO_MEM_LOCAL) { ++ ++ struct drm_bo_mem_reg *old_mem = &bo->mem; ++ uint64_t save_flags = old_mem->flags; ++ uint64_t save_proposed_flags = old_mem->proposed_flags; ++ ++ *old_mem = *mem; ++ mem->mm_node = NULL; ++ old_mem->proposed_flags = save_proposed_flags; ++ DRM_FLAG_MASKED(save_flags, mem->flags, ++ DRM_BO_MASK_MEMTYPE); ++ goto moved; ++ } ++ ++ } ++ ++ if (!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && ++ !(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED)) ++ ret = drm_bo_move_ttm(bo, evict, no_wait, mem); ++ else if (dev->driver->bo_driver->move) ++ ret = dev->driver->bo_driver->move(bo, evict, no_wait, mem); ++ else ++ ret = drm_bo_move_memcpy(bo, evict, no_wait, mem); ++ ++ if (ret) ++ goto out_err; ++ ++moved: ++ if (old_is_pci || new_is_pci) ++ drm_bo_vm_post_move(bo); ++ ++ if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) { ++ ret = ++ dev->driver->bo_driver->invalidate_caches(dev, ++ bo->mem.flags); ++ if (ret) ++ DRM_ERROR("Can not flush read caches\n"); ++ } ++ ++ DRM_FLAG_MASKED(bo->priv_flags, ++ (evict) ? _DRM_BO_FLAG_EVICTED : 0, ++ _DRM_BO_FLAG_EVICTED); ++ ++ if (bo->mem.mm_node) ++ bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) + ++ bm->man[bo->mem.mem_type].gpu_offset; ++ ++ ++ return 0; ++ ++out_err: ++ if (old_is_pci || new_is_pci) ++ drm_bo_vm_post_move(bo); ++ ++ new_man = &bm->man[bo->mem.mem_type]; ++ if ((new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && bo->ttm) { ++ drm_ttm_unbind(bo->ttm); ++ drm_ttm_destroy(bo->ttm); ++ bo->ttm = NULL; ++ } ++ ++ return ret; ++} ++ ++/* ++ * Call bo->mutex locked. ++ * Returns -EBUSY if the buffer is currently rendered to or from. 0 otherwise. ++ */ ++ ++static int drm_bo_busy(struct drm_buffer_object *bo, int check_unfenced) ++{ ++ struct drm_fence_object *fence = bo->fence; ++ ++ if (check_unfenced && (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)) ++ return -EBUSY; ++ ++ if (fence) { ++ if (drm_fence_object_signaled(fence, bo->fence_type)) { ++ drm_fence_usage_deref_unlocked(&bo->fence); ++ return 0; ++ } ++ drm_fence_object_flush(fence, DRM_FENCE_TYPE_EXE); ++ if (drm_fence_object_signaled(fence, bo->fence_type)) { ++ drm_fence_usage_deref_unlocked(&bo->fence); ++ return 0; ++ } ++ return -EBUSY; ++ } ++ return 0; ++} ++ ++static int drm_bo_check_unfenced(struct drm_buffer_object *bo) ++{ ++ int ret; ++ ++ mutex_lock(&bo->mutex); ++ ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED); ++ mutex_unlock(&bo->mutex); ++ return ret; ++} ++ ++ ++/* ++ * Call bo->mutex locked. ++ * Wait until the buffer is idle. ++ */ ++ ++int drm_bo_wait(struct drm_buffer_object *bo, int lazy, int interruptible, ++ int no_wait, int check_unfenced) ++{ ++ int ret; ++ ++ DRM_ASSERT_LOCKED(&bo->mutex); ++ while(unlikely(drm_bo_busy(bo, check_unfenced))) { ++ if (no_wait) ++ return -EBUSY; ++ ++ if (check_unfenced && (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)) { ++ mutex_unlock(&bo->mutex); ++ wait_event(bo->event_queue, !drm_bo_check_unfenced(bo)); ++ mutex_lock(&bo->mutex); ++ bo->priv_flags |= _DRM_BO_FLAG_UNLOCKED; ++ } ++ ++ if (bo->fence) { ++ struct drm_fence_object *fence; ++ uint32_t fence_type = bo->fence_type; ++ ++ drm_fence_reference_unlocked(&fence, bo->fence); ++ mutex_unlock(&bo->mutex); ++ ++ ret = drm_fence_object_wait(fence, lazy, !interruptible, ++ fence_type); ++ ++ drm_fence_usage_deref_unlocked(&fence); ++ mutex_lock(&bo->mutex); ++ bo->priv_flags |= _DRM_BO_FLAG_UNLOCKED; ++ if (ret) ++ return ret; ++ } ++ ++ } ++ return 0; ++} ++EXPORT_SYMBOL(drm_bo_wait); ++ ++static int drm_bo_expire_fence(struct drm_buffer_object *bo, int allow_errors) ++{ ++ struct drm_device *dev = bo->dev; ++ struct drm_buffer_manager *bm = &dev->bm; ++ ++ if (bo->fence) { ++ if (bm->nice_mode) { ++ unsigned long _end = jiffies + 3 * DRM_HZ; ++ int ret; ++ do { ++ ret = drm_bo_wait(bo, 0, 0, 0, 0); ++ if (ret && allow_errors) ++ return ret; ++ ++ } while (ret && !time_after_eq(jiffies, _end)); ++ ++ if (bo->fence) { ++ bm->nice_mode = 0; ++ DRM_ERROR("Detected GPU lockup or " ++ "fence driver was taken down. " ++ "Evicting buffer.\n"); ++ } ++ } ++ if (bo->fence) ++ drm_fence_usage_deref_unlocked(&bo->fence); ++ } ++ return 0; ++} ++ ++/* ++ * Call dev->struct_mutex locked. ++ * Attempts to remove all private references to a buffer by expiring its ++ * fence object and removing from lru lists and memory managers. ++ */ ++ ++static void drm_bo_cleanup_refs(struct drm_buffer_object *bo, int remove_all) ++{ ++ struct drm_device *dev = bo->dev; ++ struct drm_buffer_manager *bm = &dev->bm; ++ ++ DRM_ASSERT_LOCKED(&dev->struct_mutex); ++ ++ atomic_inc(&bo->usage); ++ mutex_unlock(&dev->struct_mutex); ++ mutex_lock(&bo->mutex); ++ ++ DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); ++ ++ if (bo->fence && drm_fence_object_signaled(bo->fence, ++ bo->fence_type)) ++ drm_fence_usage_deref_unlocked(&bo->fence); ++ ++ if (bo->fence && remove_all) ++ (void)drm_bo_expire_fence(bo, 0); ++ ++ mutex_lock(&dev->struct_mutex); ++ ++ if (!atomic_dec_and_test(&bo->usage)) ++ goto out; ++ ++ if (!bo->fence) { ++ list_del_init(&bo->lru); ++ if (bo->mem.mm_node) { ++ drm_mm_put_block(bo->mem.mm_node); ++ if (bo->pinned_node == bo->mem.mm_node) ++ bo->pinned_node = NULL; ++ bo->mem.mm_node = NULL; ++ } ++ list_del_init(&bo->pinned_lru); ++ if (bo->pinned_node) { ++ drm_mm_put_block(bo->pinned_node); ++ bo->pinned_node = NULL; ++ } ++ list_del_init(&bo->ddestroy); ++ mutex_unlock(&bo->mutex); ++ drm_bo_destroy_locked(bo); ++ return; ++ } ++ ++ if (list_empty(&bo->ddestroy)) { ++ drm_fence_object_flush(bo->fence, bo->fence_type); ++ list_add_tail(&bo->ddestroy, &bm->ddestroy); ++ schedule_delayed_work(&bm->wq, ++ ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100); ++ } ++ ++out: ++ mutex_unlock(&bo->mutex); ++ return; ++} ++ ++/* ++ * Verify that refcount is 0 and that there are no internal references ++ * to the buffer object. Then destroy it. ++ */ ++ ++static void drm_bo_destroy_locked(struct drm_buffer_object *bo) ++{ ++ struct drm_device *dev = bo->dev; ++ struct drm_buffer_manager *bm = &dev->bm; ++ ++ DRM_ASSERT_LOCKED(&dev->struct_mutex); ++ ++ DRM_DEBUG("freeing %p\n", bo); ++ if (list_empty(&bo->lru) && bo->mem.mm_node == NULL && ++ list_empty(&bo->pinned_lru) && bo->pinned_node == NULL && ++ list_empty(&bo->ddestroy) && atomic_read(&bo->usage) == 0) { ++ if (bo->fence != NULL) { ++ DRM_ERROR("Fence was non-zero.\n"); ++ drm_bo_cleanup_refs(bo, 0); ++ return; ++ } ++ ++#ifdef DRM_ODD_MM_COMPAT ++ BUG_ON(!list_empty(&bo->vma_list)); ++ BUG_ON(!list_empty(&bo->p_mm_list)); ++#endif ++ ++ if (bo->ttm) { ++ drm_ttm_unbind(bo->ttm); ++ drm_ttm_destroy(bo->ttm); ++ bo->ttm = NULL; ++ } ++ ++ atomic_dec(&bm->count); ++ ++ drm_ctl_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ); ++ ++ return; ++ } ++ ++ /* ++ * Some stuff is still trying to reference the buffer object. ++ * Get rid of those references. ++ */ ++ ++ drm_bo_cleanup_refs(bo, 0); ++ ++ return; ++} ++ ++/* ++ * Call dev->struct_mutex locked. ++ */ ++ ++static void drm_bo_delayed_delete(struct drm_device *dev, int remove_all) ++{ ++ struct drm_buffer_manager *bm = &dev->bm; ++ ++ struct drm_buffer_object *entry, *nentry; ++ struct list_head *list, *next; ++ ++ list_for_each_safe(list, next, &bm->ddestroy) { ++ entry = list_entry(list, struct drm_buffer_object, ddestroy); ++ ++ nentry = NULL; ++ DRM_DEBUG("bo is %p, %d\n", entry, entry->num_pages); ++ if (next != &bm->ddestroy) { ++ nentry = list_entry(next, struct drm_buffer_object, ++ ddestroy); ++ atomic_inc(&nentry->usage); ++ } ++ ++ drm_bo_cleanup_refs(entry, remove_all); ++ ++ if (nentry) ++ atomic_dec(&nentry->usage); ++ } ++} ++ ++static void drm_bo_delayed_workqueue(struct work_struct *work) ++{ ++ struct drm_buffer_manager *bm = ++ container_of(work, struct drm_buffer_manager, wq.work); ++ struct drm_device *dev = container_of(bm, struct drm_device, bm); ++ ++ DRM_DEBUG("Delayed delete Worker\n"); ++ ++ mutex_lock(&dev->struct_mutex); ++ if (!bm->initialized) { ++ mutex_unlock(&dev->struct_mutex); ++ return; ++ } ++ drm_bo_delayed_delete(dev, 0); ++ if (bm->initialized && !list_empty(&bm->ddestroy)) { ++ schedule_delayed_work(&bm->wq, ++ ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100); ++ } ++ mutex_unlock(&dev->struct_mutex); ++} ++ ++void drm_bo_usage_deref_locked(struct drm_buffer_object **bo) ++{ ++ struct drm_buffer_object *tmp_bo = *bo; ++ bo = NULL; ++ ++ DRM_ASSERT_LOCKED(&tmp_bo->dev->struct_mutex); ++ ++ if (atomic_dec_and_test(&tmp_bo->usage)) ++ drm_bo_destroy_locked(tmp_bo); ++} ++EXPORT_SYMBOL(drm_bo_usage_deref_locked); ++ ++void drm_bo_usage_deref_unlocked(struct drm_buffer_object **bo) ++{ ++ struct drm_buffer_object *tmp_bo = *bo; ++ struct drm_device *dev = tmp_bo->dev; ++ ++ *bo = NULL; ++ if (atomic_dec_and_test(&tmp_bo->usage)) { ++ mutex_lock(&dev->struct_mutex); ++ if (atomic_read(&tmp_bo->usage) == 0) ++ drm_bo_destroy_locked(tmp_bo); ++ mutex_unlock(&dev->struct_mutex); ++ } ++} ++EXPORT_SYMBOL(drm_bo_usage_deref_unlocked); ++ ++void drm_putback_buffer_objects(struct drm_device *dev) ++{ ++ struct drm_buffer_manager *bm = &dev->bm; ++ struct list_head *list = &bm->unfenced; ++ struct drm_buffer_object *entry, *next; ++ ++ mutex_lock(&dev->struct_mutex); ++ list_for_each_entry_safe(entry, next, list, lru) { ++ atomic_inc(&entry->usage); ++ mutex_unlock(&dev->struct_mutex); ++ ++ mutex_lock(&entry->mutex); ++ BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED)); ++ mutex_lock(&dev->struct_mutex); ++ ++ list_del_init(&entry->lru); ++ DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); ++ wake_up_all(&entry->event_queue); ++ ++ /* ++ * FIXME: Might want to put back on head of list ++ * instead of tail here. ++ */ ++ ++ drm_bo_add_to_lru(entry); ++ mutex_unlock(&entry->mutex); ++ drm_bo_usage_deref_locked(&entry); ++ } ++ mutex_unlock(&dev->struct_mutex); ++} ++EXPORT_SYMBOL(drm_putback_buffer_objects); ++ ++/* ++ * Note. The caller has to register (if applicable) ++ * and deregister fence object usage. ++ */ ++ ++int drm_fence_buffer_objects(struct drm_device *dev, ++ struct list_head *list, ++ uint32_t fence_flags, ++ struct drm_fence_object *fence, ++ struct drm_fence_object **used_fence) ++{ ++ struct drm_buffer_manager *bm = &dev->bm; ++ struct drm_buffer_object *entry; ++ uint32_t fence_type = 0; ++ uint32_t fence_class = ~0; ++ int count = 0; ++ int ret = 0; ++ struct list_head *l; ++ ++ mutex_lock(&dev->struct_mutex); ++ ++ if (!list) ++ list = &bm->unfenced; ++ ++ if (fence) ++ fence_class = fence->fence_class; ++ ++ list_for_each_entry(entry, list, lru) { ++ BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED)); ++ fence_type |= entry->new_fence_type; ++ if (fence_class == ~0) ++ fence_class = entry->new_fence_class; ++ else if (entry->new_fence_class != fence_class) { ++ DRM_ERROR("Unmatching fence classes on unfenced list: " ++ "%d and %d.\n", ++ fence_class, ++ entry->new_fence_class); ++ ret = -EINVAL; ++ goto out; ++ } ++ count++; ++ } ++ ++ if (!count) { ++ ret = -EINVAL; ++ goto out; ++ } ++ ++ if (fence) { ++ if ((fence_type & fence->type) != fence_type || ++ (fence->fence_class != fence_class)) { ++ DRM_ERROR("Given fence doesn't match buffers " ++ "on unfenced list.\n"); ++ ret = -EINVAL; ++ goto out; ++ } ++ } else { ++ mutex_unlock(&dev->struct_mutex); ++ ret = drm_fence_object_create(dev, fence_class, fence_type, ++ fence_flags | DRM_FENCE_FLAG_EMIT, ++ &fence); ++ mutex_lock(&dev->struct_mutex); ++ if (ret) ++ goto out; ++ } ++ ++ count = 0; ++ l = list->next; ++ while (l != list) { ++ prefetch(l->next); ++ entry = list_entry(l, struct drm_buffer_object, lru); ++ atomic_inc(&entry->usage); ++ mutex_unlock(&dev->struct_mutex); ++ mutex_lock(&entry->mutex); ++ mutex_lock(&dev->struct_mutex); ++ list_del_init(l); ++ if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED) { ++ count++; ++ if (entry->fence) ++ drm_fence_usage_deref_locked(&entry->fence); ++ entry->fence = drm_fence_reference_locked(fence); ++ entry->fence_class = entry->new_fence_class; ++ entry->fence_type = entry->new_fence_type; ++ DRM_FLAG_MASKED(entry->priv_flags, 0, ++ _DRM_BO_FLAG_UNFENCED); ++ wake_up_all(&entry->event_queue); ++ drm_bo_add_to_lru(entry); ++ } ++ mutex_unlock(&entry->mutex); ++ drm_bo_usage_deref_locked(&entry); ++ l = list->next; ++ } ++ DRM_DEBUG("Fenced %d buffers\n", count); ++out: ++ mutex_unlock(&dev->struct_mutex); ++ *used_fence = fence; ++ return ret; ++} ++EXPORT_SYMBOL(drm_fence_buffer_objects); ++ ++/* if we discard a buffer with no backing store - ++ * we can just set it back to a clean state ++ */ ++static int drm_bo_reset_local(struct drm_buffer_object *bo) ++{ ++ int ret = 0; ++ struct drm_device *dev = bo->dev; ++ struct drm_bo_mem_reg *old_mem = &bo->mem; ++ int old_is_pci = drm_mem_reg_is_pci(dev, &bo->mem); ++ ++ ret = drm_bo_vm_pre_move(bo, old_is_pci); ++ if (ret) ++ return ret; ++ ++ mutex_lock(&dev->struct_mutex); ++ list_del_init(&bo->lru); ++ mutex_unlock(&dev->struct_mutex); ++ ++ if (old_mem->mm_node) { ++ mutex_lock(&dev->struct_mutex); ++ drm_mm_put_block(old_mem->mm_node); ++ mutex_unlock(&dev->struct_mutex); ++ } ++ old_mem->mm_node = NULL; ++ ++ bo->mem.mem_type = DRM_BO_MEM_LOCAL; ++ ++ bo->mem.flags &= ~DRM_BO_MASK_MEMTYPE; ++ bo->mem.flags |= (DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_DISCARDABLE | ++ DRM_BO_FLAG_CLEAN); ++ ++ bo->mem.proposed_flags = bo->mem.flags; ++ return 0; ++} ++/* ++ * bo->mutex locked ++ */ ++ ++static int drm_bo_evict(struct drm_buffer_object *bo, unsigned mem_type, ++ int no_wait) ++{ ++ int ret = 0; ++ struct drm_device *dev = bo->dev; ++ struct drm_bo_mem_reg evict_mem; ++ ++ /* ++ * Someone might have modified the buffer before we took the ++ * buffer mutex. ++ */ ++ ++ do { ++ bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED; ++ ++ if (unlikely(bo->mem.flags & ++ (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))) ++ goto out_unlock; ++ if (unlikely(bo->priv_flags & _DRM_BO_FLAG_UNFENCED)) ++ goto out_unlock; ++ if (unlikely(bo->mem.mem_type != mem_type)) ++ goto out_unlock; ++ ret = drm_bo_wait(bo, 0, 1, no_wait, 0); ++ if (ret) ++ goto out_unlock; ++ ++ } while(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED); ++ ++ /* if in VRAM and discardable - discard it */ ++ if (bo->mem.mem_type == DRM_BO_MEM_VRAM && bo->mem.flags & DRM_BO_FLAG_DISCARDABLE) { ++ drm_bo_reset_local(bo); ++ goto out_unlock; ++ } ++ ++ evict_mem = bo->mem; ++ evict_mem.mm_node = NULL; ++ ++ evict_mem = bo->mem; ++ evict_mem.proposed_flags = dev->driver->bo_driver->evict_flags(bo); ++ ++ mutex_lock(&dev->struct_mutex); ++ list_del_init(&bo->lru); ++ mutex_unlock(&dev->struct_mutex); ++ ++ ret = drm_bo_mem_space(bo, &evict_mem, no_wait); ++ ++ if (ret) { ++ if (ret != -EAGAIN) ++ DRM_ERROR("Failed to find memory space for " ++ "buffer 0x%p eviction.\n", bo); ++ goto out; ++ } ++ ++ ret = drm_bo_handle_move_mem(bo, &evict_mem, 1, no_wait); ++ ++ if (ret) { ++ if (ret != -EAGAIN) ++ DRM_ERROR("Buffer eviction failed\n"); ++ goto out; ++ } ++ ++ DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED, ++ _DRM_BO_FLAG_EVICTED); ++ ++out: ++ mutex_lock(&dev->struct_mutex); ++ if (evict_mem.mm_node) { ++ if (evict_mem.mm_node != bo->pinned_node) ++ drm_mm_put_block(evict_mem.mm_node); ++ evict_mem.mm_node = NULL; ++ } ++ drm_bo_add_to_lru(bo); ++ BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED); ++ mutex_unlock(&dev->struct_mutex); ++out_unlock: ++ ++ return ret; ++} ++ ++/** ++ * Repeatedly evict memory from the LRU for @mem_type until we create enough ++ * space, or we've evicted everything and there isn't enough space. ++ */ ++static int drm_bo_mem_force_space(struct drm_device *dev, ++ struct drm_bo_mem_reg *mem, ++ uint32_t mem_type, int no_wait) ++{ ++ struct drm_mm_node *node; ++ struct drm_buffer_manager *bm = &dev->bm; ++ struct drm_buffer_object *entry; ++ struct drm_mem_type_manager *man = &bm->man[mem_type]; ++ struct list_head *lru; ++ unsigned long num_pages = mem->num_pages; ++ int ret; ++ ++ mutex_lock(&dev->struct_mutex); ++ do { ++ node = drm_mm_search_free(&man->manager, num_pages, ++ mem->page_alignment, 1); ++ if (node) ++ break; ++ ++ lru = &man->lru; ++ if (lru->next == lru) ++ break; ++ ++ entry = list_entry(lru->next, struct drm_buffer_object, lru); ++ atomic_inc(&entry->usage); ++ mutex_unlock(&dev->struct_mutex); ++ mutex_lock(&entry->mutex); ++ ret = drm_bo_evict(entry, mem_type, no_wait); ++ mutex_unlock(&entry->mutex); ++ drm_bo_usage_deref_unlocked(&entry); ++ if (ret) ++ return ret; ++ mutex_lock(&dev->struct_mutex); ++ } while (1); ++ ++ if (!node) { ++ mutex_unlock(&dev->struct_mutex); ++ return -ENOMEM; ++ } ++ ++ node = drm_mm_get_block(node, num_pages, mem->page_alignment); ++ if (unlikely(!node)) { ++ mutex_unlock(&dev->struct_mutex); ++ return -ENOMEM; ++ } ++ ++ mutex_unlock(&dev->struct_mutex); ++ mem->mm_node = node; ++ mem->mem_type = mem_type; ++ return 0; ++} ++ ++static int drm_bo_mt_compatible(struct drm_mem_type_manager *man, ++ int disallow_fixed, ++ uint32_t mem_type, ++ uint64_t mask, uint32_t *res_mask) ++{ ++ uint64_t cur_flags = drm_bo_type_flags(mem_type); ++ uint64_t flag_diff; ++ ++ if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && disallow_fixed) ++ return 0; ++ if (man->flags & _DRM_FLAG_MEMTYPE_CACHED) ++ cur_flags |= DRM_BO_FLAG_CACHED; ++ if (man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE) ++ cur_flags |= DRM_BO_FLAG_MAPPABLE; ++ if (man->flags & _DRM_FLAG_MEMTYPE_CSELECT) ++ DRM_FLAG_MASKED(cur_flags, mask, DRM_BO_FLAG_CACHED); ++ ++ if ((cur_flags & mask & DRM_BO_MASK_MEM) == 0) ++ return 0; ++ ++ if (mem_type == DRM_BO_MEM_LOCAL) { ++ *res_mask = cur_flags; ++ return 1; ++ } ++ ++ flag_diff = (mask ^ cur_flags); ++ if (flag_diff & DRM_BO_FLAG_CACHED_MAPPED) ++ cur_flags |= DRM_BO_FLAG_CACHED_MAPPED; ++ ++ if ((flag_diff & DRM_BO_FLAG_CACHED) && ++ (!(mask & DRM_BO_FLAG_CACHED) || ++ (mask & DRM_BO_FLAG_FORCE_CACHING))) ++ return 0; ++ ++ if ((flag_diff & DRM_BO_FLAG_MAPPABLE) && ++ ((mask & DRM_BO_FLAG_MAPPABLE) || ++ (mask & DRM_BO_FLAG_FORCE_MAPPABLE))) ++ return 0; ++ ++ *res_mask = cur_flags; ++ return 1; ++} ++ ++/** ++ * Creates space for memory region @mem according to its type. ++ * ++ * This function first searches for free space in compatible memory types in ++ * the priority order defined by the driver. If free space isn't found, then ++ * drm_bo_mem_force_space is attempted in priority order to evict and find ++ * space. ++ */ ++int drm_bo_mem_space(struct drm_buffer_object *bo, ++ struct drm_bo_mem_reg *mem, int no_wait) ++{ ++ struct drm_device *dev = bo->dev; ++ struct drm_buffer_manager *bm = &dev->bm; ++ struct drm_mem_type_manager *man; ++ ++ uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio; ++ const uint32_t *prios = dev->driver->bo_driver->mem_type_prio; ++ uint32_t i; ++ uint32_t mem_type = DRM_BO_MEM_LOCAL; ++ uint32_t cur_flags; ++ int type_found = 0; ++ int type_ok = 0; ++ int has_eagain = 0; ++ struct drm_mm_node *node = NULL; ++ int ret; ++ ++ mem->mm_node = NULL; ++ for (i = 0; i < num_prios; ++i) { ++ mem_type = prios[i]; ++ man = &bm->man[mem_type]; ++ ++ type_ok = drm_bo_mt_compatible(man, ++ bo->type == drm_bo_type_user, ++ mem_type, mem->proposed_flags, ++ &cur_flags); ++ ++ if (!type_ok) ++ continue; ++ ++ if (mem_type == DRM_BO_MEM_LOCAL) ++ break; ++ ++ if ((mem_type == bo->pinned_mem_type) && ++ (bo->pinned_node != NULL)) { ++ node = bo->pinned_node; ++ break; ++ } ++ ++ mutex_lock(&dev->struct_mutex); ++ if (man->has_type && man->use_type) { ++ type_found = 1; ++ node = drm_mm_search_free(&man->manager, mem->num_pages, ++ mem->page_alignment, 1); ++ if (node) ++ node = drm_mm_get_block(node, mem->num_pages, ++ mem->page_alignment); ++ } ++ mutex_unlock(&dev->struct_mutex); ++ if (node) ++ break; ++ } ++ ++ if ((type_ok && (mem_type == DRM_BO_MEM_LOCAL)) || node) { ++ mem->mm_node = node; ++ mem->mem_type = mem_type; ++ mem->flags = cur_flags; ++ return 0; ++ } ++ ++ if (!type_found) ++ return -EINVAL; ++ ++ num_prios = dev->driver->bo_driver->num_mem_busy_prio; ++ prios = dev->driver->bo_driver->mem_busy_prio; ++ ++ for (i = 0; i < num_prios; ++i) { ++ mem_type = prios[i]; ++ man = &bm->man[mem_type]; ++ ++ if (!man->has_type) ++ continue; ++ ++ if (!drm_bo_mt_compatible(man, ++ bo->type == drm_bo_type_user, ++ mem_type, ++ mem->proposed_flags, ++ &cur_flags)) ++ continue; ++ ++ ret = drm_bo_mem_force_space(dev, mem, mem_type, no_wait); ++ ++ if (ret == 0 && mem->mm_node) { ++ mem->flags = cur_flags; ++ return 0; ++ } ++ ++ if (ret == -EAGAIN) ++ has_eagain = 1; ++ } ++ ++ ret = (has_eagain) ? -EAGAIN : -ENOMEM; ++ return ret; ++} ++EXPORT_SYMBOL(drm_bo_mem_space); ++ ++/* ++ * drm_bo_propose_flags: ++ * ++ * @bo: the buffer object getting new flags ++ * ++ * @new_flags: the new set of proposed flag bits ++ * ++ * @new_mask: the mask of bits changed in new_flags ++ * ++ * Modify the proposed_flag bits in @bo ++ */ ++static int drm_bo_modify_proposed_flags (struct drm_buffer_object *bo, ++ uint64_t new_flags, uint64_t new_mask) ++{ ++ uint32_t new_access; ++ ++ /* Copy unchanging bits from existing proposed_flags */ ++ DRM_FLAG_MASKED(new_flags, bo->mem.proposed_flags, ~new_mask); ++ ++ if (bo->type == drm_bo_type_user && ++ ((new_flags & (DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING)) != ++ (DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING))) { ++ DRM_ERROR("User buffers require cache-coherent memory.\n"); ++ return -EINVAL; ++ } ++ ++ if (bo->type != drm_bo_type_kernel && (new_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) { ++ DRM_ERROR("DRM_BO_FLAG_NO_EVICT is only available to priviliged processes.\n"); ++ return -EPERM; ++ } ++ ++ if (likely(new_mask & DRM_BO_MASK_MEM) && ++ (bo->mem.flags & DRM_BO_FLAG_NO_EVICT) && ++ !DRM_SUSER(DRM_CURPROC)) { ++ if (likely(bo->mem.flags & new_flags & new_mask & ++ DRM_BO_MASK_MEM)) ++ new_flags = (new_flags & ~DRM_BO_MASK_MEM) | ++ (bo->mem.flags & DRM_BO_MASK_MEM); ++ else { ++ DRM_ERROR("Incompatible memory type specification " ++ "for NO_EVICT buffer.\n"); ++ return -EPERM; ++ } ++ } ++ ++ if ((new_flags & DRM_BO_FLAG_NO_MOVE)) { ++ DRM_ERROR("DRM_BO_FLAG_NO_MOVE is not properly implemented yet.\n"); ++ return -EPERM; ++ } ++ ++ new_access = new_flags & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE | ++ DRM_BO_FLAG_READ); ++ ++ if (new_access == 0) { ++ DRM_ERROR("Invalid buffer object rwx properties\n"); ++ return -EINVAL; ++ } ++ ++ bo->mem.proposed_flags = new_flags; ++ return 0; ++} ++ ++/* ++ * Call bo->mutex locked. ++ * Returns -EBUSY if the buffer is currently rendered to or from. 0 otherwise. ++ * Doesn't do any fence flushing as opposed to the drm_bo_busy function. ++ */ ++ ++int drm_bo_quick_busy(struct drm_buffer_object *bo, int check_unfenced) ++{ ++ struct drm_fence_object *fence = bo->fence; ++ ++ if (check_unfenced && (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)) ++ return -EBUSY; ++ ++ if (fence) { ++ if (drm_fence_object_signaled(fence, bo->fence_type)) { ++ drm_fence_usage_deref_unlocked(&bo->fence); ++ return 0; ++ } ++ return -EBUSY; ++ } ++ return 0; ++} ++ ++int drm_bo_evict_cached(struct drm_buffer_object *bo) ++{ ++ int ret = 0; ++ ++ BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED); ++ if (bo->mem.mm_node) ++ ret = drm_bo_evict(bo, DRM_BO_MEM_TT, 1); ++ return ret; ++} ++ ++EXPORT_SYMBOL(drm_bo_evict_cached); ++/* ++ * Wait until a buffer is unmapped. ++ */ ++ ++static int drm_bo_wait_unmapped(struct drm_buffer_object *bo, int no_wait) ++{ ++ int ret = 0; ++ ++ if (likely(atomic_read(&bo->mapped)) == 0) ++ return 0; ++ ++ if (unlikely(no_wait)) ++ return -EBUSY; ++ ++ do { ++ mutex_unlock(&bo->mutex); ++ ret = wait_event_interruptible(bo->event_queue, ++ atomic_read(&bo->mapped) == 0); ++ mutex_lock(&bo->mutex); ++ bo->priv_flags |= _DRM_BO_FLAG_UNLOCKED; ++ ++ if (ret == -ERESTARTSYS) ++ ret = -EAGAIN; ++ } while((ret == 0) && atomic_read(&bo->mapped) > 0); ++ ++ return ret; ++} ++ ++/* ++ * bo->mutex locked. ++ * Note that new_mem_flags are NOT transferred to the bo->mem.proposed_flags. ++ */ ++ ++int drm_bo_move_buffer(struct drm_buffer_object *bo, uint64_t new_mem_flags, ++ int no_wait, int move_unfenced) ++{ ++ struct drm_device *dev = bo->dev; ++ struct drm_buffer_manager *bm = &dev->bm; ++ int ret = 0; ++ struct drm_bo_mem_reg mem; ++ ++ BUG_ON(bo->fence != NULL); ++ ++ mem.num_pages = bo->num_pages; ++ mem.size = mem.num_pages << PAGE_SHIFT; ++ mem.proposed_flags = new_mem_flags; ++ mem.page_alignment = bo->mem.page_alignment; ++ ++ mutex_lock(&bm->evict_mutex); ++ mutex_lock(&dev->struct_mutex); ++ list_del_init(&bo->lru); ++ mutex_unlock(&dev->struct_mutex); ++ ++ /* ++ * Determine where to move the buffer. ++ */ ++ ret = drm_bo_mem_space(bo, &mem, no_wait); ++ if (ret) ++ goto out_unlock; ++ ++ ret = drm_bo_handle_move_mem(bo, &mem, 0, no_wait); ++ ++out_unlock: ++ mutex_lock(&dev->struct_mutex); ++ if (ret || !move_unfenced) { ++ if (mem.mm_node) { ++ if (mem.mm_node != bo->pinned_node) ++ drm_mm_put_block(mem.mm_node); ++ mem.mm_node = NULL; ++ } ++ drm_bo_add_to_lru(bo); ++ if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) { ++ wake_up_all(&bo->event_queue); ++ DRM_FLAG_MASKED(bo->priv_flags, 0, ++ _DRM_BO_FLAG_UNFENCED); ++ } ++ } else { ++ list_add_tail(&bo->lru, &bm->unfenced); ++ DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED, ++ _DRM_BO_FLAG_UNFENCED); ++ } ++ /* clear the clean flags */ ++ bo->mem.flags &= ~DRM_BO_FLAG_CLEAN; ++ bo->mem.proposed_flags &= ~DRM_BO_FLAG_CLEAN; ++ ++ mutex_unlock(&dev->struct_mutex); ++ mutex_unlock(&bm->evict_mutex); ++ return ret; ++} ++ ++static int drm_bo_mem_compat(struct drm_bo_mem_reg *mem) ++{ ++ uint32_t flag_diff = (mem->proposed_flags ^ mem->flags); ++ ++ if ((mem->proposed_flags & mem->flags & DRM_BO_MASK_MEM) == 0) ++ return 0; ++ if ((flag_diff & DRM_BO_FLAG_CACHED) && ++ (/* !(mem->proposed_flags & DRM_BO_FLAG_CACHED) ||*/ ++ (mem->proposed_flags & DRM_BO_FLAG_FORCE_CACHING))) ++ return 0; ++ ++ if ((flag_diff & DRM_BO_FLAG_MAPPABLE) && ++ ((mem->proposed_flags & DRM_BO_FLAG_MAPPABLE) || ++ (mem->proposed_flags & DRM_BO_FLAG_FORCE_MAPPABLE))) ++ return 0; ++ return 1; ++} ++ ++/** ++ * drm_buffer_object_validate: ++ * ++ * @bo: the buffer object to modify ++ * ++ * @fence_class: the new fence class covering this buffer ++ * ++ * @move_unfenced: a boolean indicating whether switching the ++ * memory space of this buffer should cause the buffer to ++ * be placed on the unfenced list. ++ * ++ * @no_wait: whether this function should return -EBUSY instead ++ * of waiting. ++ * ++ * Change buffer access parameters. This can involve moving ++ * the buffer to the correct memory type, pinning the buffer ++ * or changing the class/type of fence covering this buffer ++ * ++ * Must be called with bo locked. ++ */ ++ ++static int drm_buffer_object_validate(struct drm_buffer_object *bo, ++ uint32_t fence_class, ++ int move_unfenced, int no_wait, ++ int move_buffer) ++{ ++ struct drm_device *dev = bo->dev; ++ struct drm_buffer_manager *bm = &dev->bm; ++ int ret; ++ ++ if (move_buffer) { ++ ret = drm_bo_move_buffer(bo, bo->mem.proposed_flags, no_wait, ++ move_unfenced); ++ if (ret) { ++ if (ret != -EAGAIN) ++ DRM_ERROR("Failed moving buffer. %p %d %llx %llx\n", bo, bo->num_pages, bo->mem.proposed_flags, bo->mem.flags ); ++ if (ret == -ENOMEM) ++ DRM_ERROR("Out of aperture space or " ++ "DRM memory quota.\n"); ++ return ret; ++ } ++ } ++ ++ /* ++ * Pinned buffers. ++ */ ++ ++ if (bo->mem.proposed_flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) { ++ bo->pinned_mem_type = bo->mem.mem_type; ++ mutex_lock(&dev->struct_mutex); ++ list_del_init(&bo->pinned_lru); ++ drm_bo_add_to_pinned_lru(bo); ++ ++ if (bo->pinned_node != bo->mem.mm_node) { ++ if (bo->pinned_node != NULL) ++ drm_mm_put_block(bo->pinned_node); ++ bo->pinned_node = bo->mem.mm_node; ++ } ++ ++ mutex_unlock(&dev->struct_mutex); ++ ++ } else if (bo->pinned_node != NULL) { ++ ++ mutex_lock(&dev->struct_mutex); ++ ++ if (bo->pinned_node != bo->mem.mm_node) ++ drm_mm_put_block(bo->pinned_node); ++ ++ list_del_init(&bo->pinned_lru); ++ bo->pinned_node = NULL; ++ mutex_unlock(&dev->struct_mutex); ++ ++ } ++ ++ /* ++ * We might need to add a TTM. ++ */ ++ ++ if (bo->mem.mem_type == DRM_BO_MEM_LOCAL && bo->ttm == NULL) { ++ ret = drm_bo_add_ttm(bo); ++ if (ret) ++ return ret; ++ } ++ /* ++ * Validation has succeeded, move the access and other ++ * non-mapping-related flag bits from the proposed flags to ++ * the active flags ++ */ ++ ++ DRM_FLAG_MASKED(bo->mem.flags, bo->mem.proposed_flags, ~DRM_BO_MASK_MEMTYPE); ++ ++ /* ++ * Finally, adjust lru to be sure. ++ */ ++ ++ mutex_lock(&dev->struct_mutex); ++ list_del(&bo->lru); ++ if (move_unfenced) { ++ list_add_tail(&bo->lru, &bm->unfenced); ++ DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED, ++ _DRM_BO_FLAG_UNFENCED); ++ } else { ++ drm_bo_add_to_lru(bo); ++ if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) { ++ wake_up_all(&bo->event_queue); ++ DRM_FLAG_MASKED(bo->priv_flags, 0, ++ _DRM_BO_FLAG_UNFENCED); ++ } ++ } ++ mutex_unlock(&dev->struct_mutex); ++ ++ return 0; ++} ++ ++/* ++ * This function is called with bo->mutex locked, but may release it ++ * temporarily to wait for events. ++ */ ++ ++static int drm_bo_prepare_for_validate(struct drm_buffer_object *bo, ++ uint64_t flags, ++ uint64_t mask, ++ uint32_t hint, ++ uint32_t fence_class, ++ int no_wait, ++ int *move_buffer) ++{ ++ struct drm_device *dev = bo->dev; ++ struct drm_bo_driver *driver = dev->driver->bo_driver; ++ uint32_t ftype; ++ ++ int ret; ++ ++ ++ ret = drm_bo_modify_proposed_flags (bo, flags, mask); ++ if (ret) ++ return ret; ++ ++ DRM_DEBUG("Proposed flags 0x%016llx, Old flags 0x%016llx\n", ++ (unsigned long long) bo->mem.proposed_flags, ++ (unsigned long long) bo->mem.flags); ++ ++ ret = drm_bo_wait_unmapped(bo, no_wait); ++ if (ret) ++ return ret; ++ ++ ret = driver->fence_type(bo, &fence_class, &ftype); ++ ++ if (ret) { ++ DRM_ERROR("Driver did not support given buffer permissions.\n"); ++ return ret; ++ } ++ ++ /* ++ * We're switching command submission mechanism, ++ * or cannot simply rely on the hardware serializing for us. ++ * Insert a driver-dependant barrier or wait for buffer idle. ++ */ ++ ++ if ((fence_class != bo->fence_class) || ++ ((ftype ^ bo->fence_type) & bo->fence_type)) { ++ ++ ret = -EINVAL; ++ if (driver->command_stream_barrier) { ++ ret = driver->command_stream_barrier(bo, ++ fence_class, ++ ftype, ++ no_wait); ++ } ++ if (ret && ret != -EAGAIN) ++ ret = drm_bo_wait(bo, 0, 1, no_wait, 1); ++ ++ if (ret) ++ return ret; ++ } ++ ++ bo->new_fence_class = fence_class; ++ bo->new_fence_type = ftype; ++ ++ /* ++ * Check whether we need to move buffer. ++ */ ++ ++ *move_buffer = 0; ++ if (!drm_bo_mem_compat(&bo->mem)) { ++ *move_buffer = 1; ++ ret = drm_bo_wait(bo, 0, 1, no_wait, 1); ++ } ++ ++ return ret; ++} ++ ++/** ++ * drm_bo_do_validate: ++ * ++ * @bo: the buffer object ++ * ++ * @flags: access rights, mapping parameters and cacheability. See ++ * the DRM_BO_FLAG_* values in drm.h ++ * ++ * @mask: Which flag values to change; this allows callers to modify ++ * things without knowing the current state of other flags. ++ * ++ * @hint: changes the proceedure for this operation, see the DRM_BO_HINT_* ++ * values in drm.h. ++ * ++ * @fence_class: a driver-specific way of doing fences. Presumably, ++ * this would be used if the driver had more than one submission and ++ * fencing mechanism. At this point, there isn't any use of this ++ * from the user mode code. ++ * ++ * @rep: To be stuffed with the reply from validation ++ * ++ * 'validate' a buffer object. This changes where the buffer is ++ * located, along with changing access modes. ++ */ ++ ++int drm_bo_do_validate(struct drm_buffer_object *bo, ++ uint64_t flags, uint64_t mask, uint32_t hint, ++ uint32_t fence_class) ++{ ++ int ret; ++ int no_wait = (hint & DRM_BO_HINT_DONT_BLOCK) != 0; ++ int move_buffer; ++ ++ mutex_lock(&bo->mutex); ++ ++ do { ++ bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED; ++ ++ ret = drm_bo_prepare_for_validate(bo, flags, mask, hint, ++ fence_class, no_wait, ++ &move_buffer); ++ if (ret) ++ goto out; ++ ++ } while(unlikely(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED)); ++ ++ ret = drm_buffer_object_validate(bo, ++ fence_class, ++ !(hint & DRM_BO_HINT_DONT_FENCE), ++ no_wait, ++ move_buffer); ++ ++ BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED); ++out: ++ mutex_unlock(&bo->mutex); ++ ++ return ret; ++} ++EXPORT_SYMBOL(drm_bo_do_validate); ++ ++int drm_buffer_object_create(struct drm_device *dev, ++ unsigned long size, ++ enum drm_bo_type type, ++ uint64_t flags, ++ uint32_t hint, ++ uint32_t page_alignment, ++ unsigned long buffer_start, ++ struct drm_buffer_object **buf_obj) ++{ ++ struct drm_buffer_manager *bm = &dev->bm; ++ struct drm_buffer_object *bo; ++ int ret = 0; ++ unsigned long num_pages; ++ ++ size += buffer_start & ~PAGE_MASK; ++ num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; ++ if (num_pages == 0) { ++ DRM_ERROR("Illegal buffer object size %ld.\n", size); ++ return -EINVAL; ++ } ++ ++ bo = drm_ctl_calloc(1, sizeof(*bo), DRM_MEM_BUFOBJ); ++ ++ if (!bo) ++ return -ENOMEM; ++ ++ mutex_init(&bo->mutex); ++ mutex_lock(&bo->mutex); ++ ++ atomic_set(&bo->usage, 1); ++ atomic_set(&bo->mapped, 0); ++ DRM_INIT_WAITQUEUE(&bo->event_queue); ++ INIT_LIST_HEAD(&bo->lru); ++ INIT_LIST_HEAD(&bo->pinned_lru); ++ INIT_LIST_HEAD(&bo->ddestroy); ++#ifdef DRM_ODD_MM_COMPAT ++ INIT_LIST_HEAD(&bo->p_mm_list); ++ INIT_LIST_HEAD(&bo->vma_list); ++#endif ++ bo->dev = dev; ++ bo->type = type; ++ bo->num_pages = num_pages; ++ bo->mem.mem_type = DRM_BO_MEM_LOCAL; ++ bo->mem.num_pages = bo->num_pages; ++ bo->mem.mm_node = NULL; ++ bo->mem.page_alignment = page_alignment; ++ bo->buffer_start = buffer_start & PAGE_MASK; ++ bo->priv_flags = 0; ++ bo->mem.flags = (DRM_BO_FLAG_MEM_LOCAL | ++ DRM_BO_FLAG_MAPPABLE | DRM_BO_FLAG_CLEAN); ++ bo->mem.proposed_flags = 0; ++ atomic_inc(&bm->count); ++ ++ if (dev->bm.allocator_type == _DRM_BM_ALLOCATOR_CACHED) ++ bo->mem.flags |= DRM_BO_FLAG_CACHED; ++ /* ++ * Use drm_bo_modify_proposed_flags to error-check the proposed flags ++ */ ++ flags |= DRM_BO_FLAG_CLEAN; ++ ++ ret = drm_bo_modify_proposed_flags (bo, flags, flags); ++ if (ret) ++ goto out_err; ++ ++ /* ++ * For drm_bo_type_device buffers, allocate ++ * address space from the device so that applications ++ * can mmap the buffer from there ++ */ ++ if (bo->type == drm_bo_type_device) { ++ mutex_lock(&dev->struct_mutex); ++ ret = drm_bo_setup_vm_locked(bo); ++ mutex_unlock(&dev->struct_mutex); ++ if (ret) ++ goto out_err; ++ } ++ ++ mutex_unlock(&bo->mutex); ++ ret = drm_bo_do_validate(bo, 0, 0, hint | DRM_BO_HINT_DONT_FENCE, ++ 0); ++ if (ret) ++ goto out_err_unlocked; ++ ++ *buf_obj = bo; ++ return 0; ++ ++out_err: ++ mutex_unlock(&bo->mutex); ++out_err_unlocked: ++ drm_bo_usage_deref_unlocked(&bo); ++ return ret; ++} ++EXPORT_SYMBOL(drm_buffer_object_create); ++ ++static int drm_bo_leave_list(struct drm_buffer_object *bo, ++ uint32_t mem_type, ++ int free_pinned, ++ int allow_errors) ++{ ++ struct drm_device *dev = bo->dev; ++ int ret = 0; ++ ++ mutex_lock(&bo->mutex); ++ ++ ret = drm_bo_expire_fence(bo, allow_errors); ++ if (ret) ++ goto out; ++ ++ if (free_pinned) { ++ DRM_FLAG_MASKED(bo->mem.flags, 0, DRM_BO_FLAG_NO_MOVE); ++ mutex_lock(&dev->struct_mutex); ++ list_del_init(&bo->pinned_lru); ++ if (bo->pinned_node == bo->mem.mm_node) ++ bo->pinned_node = NULL; ++ if (bo->pinned_node != NULL) { ++ drm_mm_put_block(bo->pinned_node); ++ bo->pinned_node = NULL; ++ } ++ mutex_unlock(&dev->struct_mutex); ++ } ++ ++ if (bo->mem.flags & DRM_BO_FLAG_NO_EVICT) { ++ DRM_ERROR("A DRM_BO_NO_EVICT buffer present at " ++ "cleanup. Removing flag and evicting.\n"); ++ bo->mem.flags &= ~DRM_BO_FLAG_NO_EVICT; ++ bo->mem.proposed_flags &= ~DRM_BO_FLAG_NO_EVICT; ++ } ++ ++ if (bo->mem.mem_type == mem_type) ++ ret = drm_bo_evict(bo, mem_type, 0); ++ ++ if (ret) { ++ if (allow_errors) { ++ goto out; ++ } else { ++ ret = 0; ++ DRM_ERROR("Cleanup eviction failed\n"); ++ } ++ } ++ ++out: ++ mutex_unlock(&bo->mutex); ++ return ret; ++} ++ ++ ++static struct drm_buffer_object *drm_bo_entry(struct list_head *list, ++ int pinned_list) ++{ ++ if (pinned_list) ++ return list_entry(list, struct drm_buffer_object, pinned_lru); ++ else ++ return list_entry(list, struct drm_buffer_object, lru); ++} ++ ++/* ++ * dev->struct_mutex locked. ++ */ ++ ++static int drm_bo_force_list_clean(struct drm_device *dev, ++ struct list_head *head, ++ unsigned mem_type, ++ int free_pinned, ++ int allow_errors, ++ int pinned_list) ++{ ++ struct list_head *list, *next, *prev; ++ struct drm_buffer_object *entry, *nentry; ++ int ret; ++ int do_restart; ++ ++ /* ++ * The list traversal is a bit odd here, because an item may ++ * disappear from the list when we release the struct_mutex or ++ * when we decrease the usage count. Also we're not guaranteed ++ * to drain pinned lists, so we can't always restart. ++ */ ++ ++restart: ++ nentry = NULL; ++ list_for_each_safe(list, next, head) { ++ prev = list->prev; ++ ++ entry = (nentry != NULL) ? nentry: drm_bo_entry(list, pinned_list); ++ atomic_inc(&entry->usage); ++ if (nentry) { ++ atomic_dec(&nentry->usage); ++ nentry = NULL; ++ } ++ ++ /* ++ * Protect the next item from destruction, so we can check ++ * its list pointers later on. ++ */ ++ ++ if (next != head) { ++ nentry = drm_bo_entry(next, pinned_list); ++ atomic_inc(&nentry->usage); ++ } ++ mutex_unlock(&dev->struct_mutex); ++ ++ ret = drm_bo_leave_list(entry, mem_type, free_pinned, ++ allow_errors); ++ mutex_lock(&dev->struct_mutex); ++ ++ drm_bo_usage_deref_locked(&entry); ++ if (ret) ++ return ret; ++ ++ /* ++ * Has the next item disappeared from the list? ++ */ ++ ++ do_restart = ((next->prev != list) && (next->prev != prev)); ++ ++ if (nentry != NULL && do_restart) ++ drm_bo_usage_deref_locked(&nentry); ++ ++ if (do_restart) ++ goto restart; ++ } ++ return 0; ++} ++ ++int drm_bo_clean_mm(struct drm_device *dev, unsigned mem_type, int kern_clean) ++{ ++ struct drm_buffer_manager *bm = &dev->bm; ++ struct drm_mem_type_manager *man = &bm->man[mem_type]; ++ int ret = -EINVAL; ++ ++ if (mem_type >= DRM_BO_MEM_TYPES) { ++ DRM_ERROR("Illegal memory type %d\n", mem_type); ++ return ret; ++ } ++ ++ if (!man->has_type) { ++ DRM_ERROR("Trying to take down uninitialized " ++ "memory manager type %u\n", mem_type); ++ return ret; ++ } ++ ++ if ((man->kern_init_type) && (kern_clean == 0)) { ++ DRM_ERROR("Trying to take down kernel initialized " ++ "memory manager type %u\n", mem_type); ++ return -EPERM; ++ } ++ ++ man->use_type = 0; ++ man->has_type = 0; ++ ++ ret = 0; ++ if (mem_type > 0) { ++ BUG_ON(!list_empty(&bm->unfenced)); ++ drm_bo_force_list_clean(dev, &man->lru, mem_type, 1, 0, 0); ++ drm_bo_force_list_clean(dev, &man->pinned, mem_type, 1, 0, 1); ++ ++ if (drm_mm_clean(&man->manager)) { ++ drm_mm_takedown(&man->manager); ++ } else { ++ ret = -EBUSY; ++ } ++ } ++ ++ return ret; ++} ++EXPORT_SYMBOL(drm_bo_clean_mm); ++ ++/** ++ *Evict all buffers of a particular mem_type, but leave memory manager ++ *regions for NO_MOVE buffers intact. New buffers cannot be added at this ++ *point since we have the hardware lock. ++ */ ++ ++int drm_bo_lock_mm(struct drm_device *dev, unsigned mem_type) ++{ ++ int ret; ++ struct drm_buffer_manager *bm = &dev->bm; ++ struct drm_mem_type_manager *man = &bm->man[mem_type]; ++ ++ if (mem_type == 0 || mem_type >= DRM_BO_MEM_TYPES) { ++ DRM_ERROR("Illegal memory manager memory type %u.\n", mem_type); ++ return -EINVAL; ++ } ++ ++ if (!man->has_type) { ++ DRM_ERROR("Memory type %u has not been initialized.\n", ++ mem_type); ++ return 0; ++ } ++ ++ ret = drm_bo_force_list_clean(dev, &man->lru, mem_type, 0, 1, 0); ++ if (ret) ++ return ret; ++ ret = drm_bo_force_list_clean(dev, &man->pinned, mem_type, 0, 1, 1); ++ ++ return ret; ++} ++ ++int drm_bo_init_mm(struct drm_device *dev, unsigned type, ++ unsigned long p_offset, unsigned long p_size, ++ int kern_init) ++{ ++ struct drm_buffer_manager *bm = &dev->bm; ++ int ret = -EINVAL; ++ struct drm_mem_type_manager *man; ++ ++ if (type >= DRM_BO_MEM_TYPES) { ++ DRM_ERROR("Illegal memory type %d\n", type); ++ return ret; ++ } ++ ++ man = &bm->man[type]; ++ if (man->has_type) { ++ DRM_ERROR("Memory manager already initialized for type %d\n", ++ type); ++ return ret; ++ } ++ ++ ret = dev->driver->bo_driver->init_mem_type(dev, type, man); ++ if (ret) ++ return ret; ++ ++ ret = 0; ++ if (type != DRM_BO_MEM_LOCAL) { ++ if (!p_size) { ++ DRM_ERROR("Zero size memory manager type %d\n", type); ++ return ret; ++ } ++ ret = drm_mm_init(&man->manager, p_offset, p_size); ++ if (ret) ++ return ret; ++ } ++ man->has_type = 1; ++ man->use_type = 1; ++ man->kern_init_type = kern_init; ++ man->size = p_size; ++ ++ INIT_LIST_HEAD(&man->lru); ++ INIT_LIST_HEAD(&man->pinned); ++ ++ return 0; ++} ++EXPORT_SYMBOL(drm_bo_init_mm); ++ ++/* ++ * This function is intended to be called on drm driver unload. ++ * If you decide to call it from lastclose, you must protect the call ++ * from a potentially racing drm_bo_driver_init in firstopen. ++ * (This may happen on X server restart). ++ */ ++ ++int drm_bo_driver_finish(struct drm_device *dev) ++{ ++ struct drm_buffer_manager *bm = &dev->bm; ++ int ret = 0; ++ unsigned i = DRM_BO_MEM_TYPES; ++ struct drm_mem_type_manager *man; ++ ++ mutex_lock(&dev->struct_mutex); ++ ++ if (!bm->initialized) ++ goto out; ++ bm->initialized = 0; ++ ++ while (i--) { ++ man = &bm->man[i]; ++ if (man->has_type) { ++ man->use_type = 0; ++ if ((i != DRM_BO_MEM_LOCAL) && drm_bo_clean_mm(dev, i, 1)) { ++ ret = -EBUSY; ++ DRM_ERROR("DRM memory manager type %d " ++ "is not clean.\n", i); ++ } ++ man->has_type = 0; ++ } ++ } ++ mutex_unlock(&dev->struct_mutex); ++ ++ if (!cancel_delayed_work(&bm->wq)) ++ flush_scheduled_work(); ++ ++ mutex_lock(&dev->struct_mutex); ++ drm_bo_delayed_delete(dev, 1); ++ if (list_empty(&bm->ddestroy)) ++ DRM_DEBUG("Delayed destroy list was clean\n"); ++ ++ if (list_empty(&bm->man[0].lru)) ++ DRM_DEBUG("Swap list was clean\n"); ++ ++ if (list_empty(&bm->man[0].pinned)) ++ DRM_DEBUG("NO_MOVE list was clean\n"); ++ ++ if (list_empty(&bm->unfenced)) ++ DRM_DEBUG("Unfenced list was clean\n"); ++ ++ if (bm->dummy_read_page) ++ __free_page(bm->dummy_read_page); ++ ++ drm_uncached_fini(); ++out: ++ mutex_unlock(&dev->struct_mutex); ++ return ret; ++} ++EXPORT_SYMBOL(drm_bo_driver_finish); ++ ++/* ++ * This function is intended to be called on drm driver load. ++ * If you decide to call it from firstopen, you must protect the call ++ * from a potentially racing drm_bo_driver_finish in lastclose. ++ * (This may happen on X server restart). ++ */ ++ ++int drm_bo_driver_init(struct drm_device *dev) ++{ ++ struct drm_bo_driver *driver = dev->driver->bo_driver; ++ struct drm_buffer_manager *bm = &dev->bm; ++ int ret = -EINVAL; ++ ++ drm_uncached_init(); ++ ++ bm->dummy_read_page = NULL; ++ mutex_lock(&dev->struct_mutex); ++ if (!driver) ++ goto out_unlock; ++ ++ bm->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32); ++ if (!bm->dummy_read_page) { ++ ret = -ENOMEM; ++ goto out_unlock; ++ } ++ ++ /* ++ * Initialize the system memory buffer type. ++ * Other types need to be driver / IOCTL initialized. ++ */ ++ ret = drm_bo_init_mm(dev, DRM_BO_MEM_LOCAL, 0, 0, 1); ++ if (ret) { ++ __free_page(bm->dummy_read_page); ++ bm->dummy_read_page = NULL; ++ goto out_unlock; ++ } ++ ++ INIT_DELAYED_WORK(&bm->wq, drm_bo_delayed_workqueue); ++ bm->initialized = 1; ++ bm->nice_mode = 1; ++ atomic_set(&bm->count, 0); ++ bm->cur_pages = 0; ++ INIT_LIST_HEAD(&bm->unfenced); ++ INIT_LIST_HEAD(&bm->ddestroy); ++out_unlock: ++ mutex_unlock(&dev->struct_mutex); ++ return ret; ++} ++EXPORT_SYMBOL(drm_bo_driver_init); ++ ++/* ++ * buffer object vm functions. ++ */ ++ ++int drm_mem_reg_is_pci(struct drm_device *dev, struct drm_bo_mem_reg *mem) ++{ ++ struct drm_buffer_manager *bm = &dev->bm; ++ struct drm_mem_type_manager *man = &bm->man[mem->mem_type]; ++ ++ if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED)) { ++ if (mem->mem_type == DRM_BO_MEM_LOCAL) ++ return 0; ++ ++ if (man->flags & _DRM_FLAG_MEMTYPE_CMA) ++ return 0; ++ ++ if (mem->flags & DRM_BO_FLAG_CACHED) ++ return 0; ++ } ++ return 1; ++} ++EXPORT_SYMBOL(drm_mem_reg_is_pci); ++ ++/** ++ * \c Get the PCI offset for the buffer object memory. ++ * ++ * \param bo The buffer object. ++ * \param bus_base On return the base of the PCI region ++ * \param bus_offset On return the byte offset into the PCI region ++ * \param bus_size On return the byte size of the buffer object or zero if ++ * the buffer object memory is not accessible through a PCI region. ++ * \return Failure indication. ++ * ++ * Returns -EINVAL if the buffer object is currently not mappable. ++ * Otherwise returns zero. ++ */ ++ ++int drm_bo_pci_offset(struct drm_device *dev, ++ struct drm_bo_mem_reg *mem, ++ unsigned long *bus_base, ++ unsigned long *bus_offset, unsigned long *bus_size) ++{ ++ struct drm_buffer_manager *bm = &dev->bm; ++ struct drm_mem_type_manager *man = &bm->man[mem->mem_type]; ++ ++ *bus_size = 0; ++ if (!(man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE)) ++ return -EINVAL; ++ ++ if (drm_mem_reg_is_pci(dev, mem)) { ++ *bus_offset = mem->mm_node->start << PAGE_SHIFT; ++ *bus_size = mem->num_pages << PAGE_SHIFT; ++ *bus_base = man->io_offset; ++ } ++ ++ return 0; ++} ++ ++/** ++ * \c Kill all user-space virtual mappings of this buffer object. ++ * ++ * \param bo The buffer object. ++ * ++ * Call bo->mutex locked. ++ */ ++ ++void drm_bo_unmap_virtual(struct drm_buffer_object *bo) ++{ ++ struct drm_device *dev = bo->dev; ++ loff_t offset = ((loff_t) bo->map_list.hash.key) << PAGE_SHIFT; ++ loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT; ++ ++ if (!dev->dev_mapping) ++ return; ++ ++ unmap_mapping_range(dev->dev_mapping, offset, holelen, 1); ++} ++ ++/** ++ * drm_bo_takedown_vm_locked: ++ * ++ * @bo: the buffer object to remove any drm device mapping ++ * ++ * Remove any associated vm mapping on the drm device node that ++ * would have been created for a drm_bo_type_device buffer ++ */ ++void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo) ++{ ++ struct drm_map_list *list; ++ drm_local_map_t *map; ++ struct drm_device *dev = bo->dev; ++ ++ DRM_ASSERT_LOCKED(&dev->struct_mutex); ++ if (bo->type != drm_bo_type_device) ++ return; ++ ++ list = &bo->map_list; ++ if (list->user_token) { ++ drm_ht_remove_item(&dev->map_hash, &list->hash); ++ list->user_token = 0; ++ } ++ if (list->file_offset_node) { ++ drm_mm_put_block(list->file_offset_node); ++ list->file_offset_node = NULL; ++ } ++ ++ map = list->map; ++ if (!map) ++ return; ++ ++ drm_ctl_free(map, sizeof(*map), DRM_MEM_BUFOBJ); ++ list->map = NULL; ++ list->user_token = 0ULL; ++ drm_bo_usage_deref_locked(&bo); ++} ++EXPORT_SYMBOL(drm_bo_takedown_vm_locked); ++ ++/** ++ * drm_bo_setup_vm_locked: ++ * ++ * @bo: the buffer to allocate address space for ++ * ++ * Allocate address space in the drm device so that applications ++ * can mmap the buffer and access the contents. This only ++ * applies to drm_bo_type_device objects as others are not ++ * placed in the drm device address space. ++ */ ++static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo) ++{ ++ struct drm_map_list *list = &bo->map_list; ++ drm_local_map_t *map; ++ struct drm_device *dev = bo->dev; ++ ++ DRM_ASSERT_LOCKED(&dev->struct_mutex); ++ list->map = drm_ctl_calloc(1, sizeof(*map), DRM_MEM_BUFOBJ); ++ if (!list->map) ++ return -ENOMEM; ++ ++ map = list->map; ++ map->offset = 0; ++ map->type = _DRM_TTM; ++ map->flags = _DRM_REMOVABLE; ++ map->size = bo->mem.num_pages * PAGE_SIZE; ++ atomic_inc(&bo->usage); ++ map->handle = (void *)bo; ++ ++ list->file_offset_node = drm_mm_search_free(&dev->offset_manager, ++ bo->mem.num_pages, 0, 0); ++ ++ if (unlikely(!list->file_offset_node)) { ++ drm_bo_takedown_vm_locked(bo); ++ return -ENOMEM; ++ } ++ ++ list->file_offset_node = drm_mm_get_block(list->file_offset_node, ++ bo->mem.num_pages, 0); ++ ++ if (unlikely(!list->file_offset_node)) { ++ drm_bo_takedown_vm_locked(bo); ++ return -ENOMEM; ++ } ++ ++ list->hash.key = list->file_offset_node->start; ++ if (drm_ht_insert_item(&dev->map_hash, &list->hash)) { ++ drm_bo_takedown_vm_locked(bo); ++ return -ENOMEM; ++ } ++ ++ list->user_token = ((uint64_t) list->hash.key) << PAGE_SHIFT; ++ ++ return 0; ++} ++ ++/* used to EVICT VRAM lru at suspend time */ ++void drm_bo_evict_mm(struct drm_device *dev, int mem_type, int no_wait) ++{ ++ struct drm_buffer_manager *bm = &dev->bm; ++ struct drm_mem_type_manager *man = &bm->man[mem_type]; ++ struct drm_buffer_object *entry; ++ /* we need to migrate all objects in VRAM */ ++ struct list_head *lru; ++ int ret; ++ /* evict all buffers on the LRU - won't evict pinned buffers */ ++ ++ mutex_lock(&dev->struct_mutex); ++ do { ++ lru = &man->lru; ++ ++redo: ++ if (lru->next == &man->lru) { ++ DRM_ERROR("lru empty\n"); ++ break; ++ } ++ ++ entry = list_entry(lru->next, struct drm_buffer_object, lru); ++ ++ // if (entry->mem.flags & DRM_BO_FLAG_DISCARDABLE) { ++ // lru = lru->next; ++ // goto redo; ++ // } ++ ++ atomic_inc(&entry->usage); ++ mutex_unlock(&dev->struct_mutex); ++ mutex_lock(&entry->mutex); ++ ++ ret = drm_bo_evict(entry, mem_type, no_wait); ++ mutex_unlock(&entry->mutex); ++ ++ if (ret) ++ DRM_ERROR("Evict failed for BO\n"); ++ ++ mutex_lock(&entry->mutex); ++ (void)drm_bo_expire_fence(entry, 0); ++ mutex_unlock(&entry->mutex); ++ drm_bo_usage_deref_unlocked(&entry); ++ ++ mutex_lock(&dev->struct_mutex); ++ } while(1); ++ ++ mutex_unlock(&dev->struct_mutex); ++ ++} ++EXPORT_SYMBOL(drm_bo_evict_mm); +diff --git a/drivers/gpu/drm/drm_bo_move.c b/drivers/gpu/drm/drm_bo_move.c +new file mode 100644 +index 0000000..abeab6a +--- /dev/null ++++ b/drivers/gpu/drm/drm_bo_move.c +@@ -0,0 +1,709 @@ ++/************************************************************************** ++ * ++ * Copyright (c) 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sub license, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial portions ++ * of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, ++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR ++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE ++ * USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ **************************************************************************/ ++/* ++ * Authors: Thomas Hellström ++ */ ++ ++#include "drmP.h" ++ ++#if defined(CONFIG_X86) ++#include ++#endif ++ ++/** ++ * Free the old memory node unless it's a pinned region and we ++ * have not been requested to free also pinned regions. ++ */ ++ ++static void drm_bo_free_old_node(struct drm_buffer_object *bo) ++{ ++ struct drm_bo_mem_reg *old_mem = &bo->mem; ++ ++ if (old_mem->mm_node && (old_mem->mm_node != bo->pinned_node)) { ++ mutex_lock(&bo->dev->struct_mutex); ++ drm_mm_put_block(old_mem->mm_node); ++ mutex_unlock(&bo->dev->struct_mutex); ++ } ++ old_mem->mm_node = NULL; ++} ++ ++int drm_bo_move_ttm(struct drm_buffer_object *bo, ++ int evict, int no_wait, struct drm_bo_mem_reg *new_mem) ++{ ++ struct drm_ttm *ttm = bo->ttm; ++ struct drm_bo_mem_reg *old_mem = &bo->mem; ++ uint64_t save_flags = old_mem->flags; ++ uint64_t save_proposed_flags = old_mem->proposed_flags; ++ int ret; ++ ++ if (old_mem->mem_type != DRM_BO_MEM_LOCAL) { ++ if (evict) ++ drm_ttm_evict(ttm); ++ else ++ drm_ttm_unbind(ttm); ++ ++ drm_bo_free_old_node(bo); ++ DRM_FLAG_MASKED(old_mem->flags, ++ DRM_BO_FLAG_CACHED | DRM_BO_FLAG_MAPPABLE | ++ DRM_BO_FLAG_MEM_LOCAL, DRM_BO_MASK_MEMTYPE); ++ old_mem->mem_type = DRM_BO_MEM_LOCAL; ++ save_flags = old_mem->flags; ++ } ++ if (new_mem->mem_type != DRM_BO_MEM_LOCAL) { ++ ret = drm_ttm_bind(ttm, new_mem); ++ if (ret) ++ return ret; ++ } ++ ++ *old_mem = *new_mem; ++ new_mem->mm_node = NULL; ++ old_mem->proposed_flags = save_proposed_flags; ++ DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE); ++ return 0; ++} ++EXPORT_SYMBOL(drm_bo_move_ttm); ++ ++/** ++ * \c Return a kernel virtual address to the buffer object PCI memory. ++ * ++ * \param bo The buffer object. ++ * \return Failure indication. ++ * ++ * Returns -EINVAL if the buffer object is currently not mappable. ++ * Returns -ENOMEM if the ioremap operation failed. ++ * Otherwise returns zero. ++ * ++ * After a successfull call, bo->iomap contains the virtual address, or NULL ++ * if the buffer object content is not accessible through PCI space. ++ * Call bo->mutex locked. ++ */ ++ ++int drm_mem_reg_ioremap(struct drm_device *dev, struct drm_bo_mem_reg *mem, ++ void **virtual) ++{ ++ struct drm_buffer_manager *bm = &dev->bm; ++ struct drm_mem_type_manager *man = &bm->man[mem->mem_type]; ++ unsigned long bus_offset; ++ unsigned long bus_size; ++ unsigned long bus_base; ++ int ret; ++ void *addr; ++ ++ *virtual = NULL; ++ ret = drm_bo_pci_offset(dev, mem, &bus_base, &bus_offset, &bus_size); ++ if (ret || bus_size == 0) ++ return ret; ++ ++ if (!(man->flags & _DRM_FLAG_NEEDS_IOREMAP)) ++ addr = (void *)(((u8 *) man->io_addr) + bus_offset); ++ else { ++ addr = ioremap_nocache(bus_base + bus_offset, bus_size); ++ if (!addr) ++ return -ENOMEM; ++ } ++ *virtual = addr; ++ return 0; ++} ++EXPORT_SYMBOL(drm_mem_reg_ioremap); ++ ++/** ++ * \c Unmap mapping obtained using drm_bo_ioremap ++ * ++ * \param bo The buffer object. ++ * ++ * Call bo->mutex locked. ++ */ ++ ++void drm_mem_reg_iounmap(struct drm_device *dev, struct drm_bo_mem_reg *mem, ++ void *virtual) ++{ ++ struct drm_buffer_manager *bm; ++ struct drm_mem_type_manager *man; ++ ++ bm = &dev->bm; ++ man = &bm->man[mem->mem_type]; ++ ++ if (virtual && (man->flags & _DRM_FLAG_NEEDS_IOREMAP)) ++ iounmap(virtual); ++} ++EXPORT_SYMBOL(drm_mem_reg_iounmap); ++ ++static int drm_copy_io_page(void *dst, void *src, unsigned long page) ++{ ++ uint32_t *dstP = ++ (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT)); ++ uint32_t *srcP = ++ (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT)); ++ ++ int i; ++ for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i) ++ iowrite32(ioread32(srcP++), dstP++); ++ return 0; ++} ++ ++static int drm_copy_io_ttm_page(struct drm_ttm *ttm, void *src, ++ unsigned long page) ++{ ++ struct page *d = drm_ttm_get_page(ttm, page); ++ void *dst; ++ ++ if (!d) ++ return -ENOMEM; ++ ++ src = (void *)((unsigned long)src + (page << PAGE_SHIFT)); ++ dst = kmap(d); ++ if (!dst) ++ return -ENOMEM; ++ ++ memcpy_fromio(dst, src, PAGE_SIZE); ++ kunmap(d); ++ return 0; ++} ++ ++static int drm_copy_ttm_io_page(struct drm_ttm *ttm, void *dst, unsigned long page) ++{ ++ struct page *s = drm_ttm_get_page(ttm, page); ++ void *src; ++ ++ if (!s) ++ return -ENOMEM; ++ ++ dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT)); ++ src = kmap(s); ++ if (!src) ++ return -ENOMEM; ++ ++ memcpy_toio(dst, src, PAGE_SIZE); ++ kunmap(s); ++ return 0; ++} ++ ++int drm_bo_move_memcpy(struct drm_buffer_object *bo, ++ int evict, int no_wait, struct drm_bo_mem_reg *new_mem) ++{ ++ struct drm_device *dev = bo->dev; ++ struct drm_mem_type_manager *man = &dev->bm.man[new_mem->mem_type]; ++ struct drm_ttm *ttm = bo->ttm; ++ struct drm_bo_mem_reg *old_mem = &bo->mem; ++ struct drm_bo_mem_reg old_copy = *old_mem; ++ void *old_iomap; ++ void *new_iomap; ++ int ret; ++ uint64_t save_flags = old_mem->flags; ++ uint64_t save_proposed_flags = old_mem->proposed_flags; ++ unsigned long i; ++ unsigned long page; ++ unsigned long add = 0; ++ int dir; ++ ++ ret = drm_mem_reg_ioremap(dev, old_mem, &old_iomap); ++ if (ret) ++ return ret; ++ ret = drm_mem_reg_ioremap(dev, new_mem, &new_iomap); ++ if (ret) ++ goto out; ++ ++ if (old_iomap == NULL && new_iomap == NULL) ++ goto out2; ++ if (old_iomap == NULL && ttm == NULL) ++ goto out2; ++ ++ add = 0; ++ dir = 1; ++ ++ if ((old_mem->mem_type == new_mem->mem_type) && ++ (new_mem->mm_node->start < ++ old_mem->mm_node->start + old_mem->mm_node->size)) { ++ dir = -1; ++ add = new_mem->num_pages - 1; ++ } ++ ++ for (i = 0; i < new_mem->num_pages; ++i) { ++ page = i * dir + add; ++ if (old_iomap == NULL) ++ ret = drm_copy_ttm_io_page(ttm, new_iomap, page); ++ else if (new_iomap == NULL) ++ ret = drm_copy_io_ttm_page(ttm, old_iomap, page); ++ else ++ ret = drm_copy_io_page(new_iomap, old_iomap, page); ++ if (ret) ++ goto out1; ++ } ++ mb(); ++out2: ++ drm_bo_free_old_node(bo); ++ ++ *old_mem = *new_mem; ++ new_mem->mm_node = NULL; ++ old_mem->proposed_flags = save_proposed_flags; ++ DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE); ++ ++ if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (ttm != NULL)) { ++ drm_ttm_unbind(ttm); ++ drm_ttm_destroy(ttm); ++ bo->ttm = NULL; ++ } ++ ++out1: ++ drm_mem_reg_iounmap(dev, new_mem, new_iomap); ++out: ++ drm_mem_reg_iounmap(dev, &old_copy, old_iomap); ++ return ret; ++} ++EXPORT_SYMBOL(drm_bo_move_memcpy); ++ ++static int drm_memset_io_page(void *dst, unsigned long page) ++{ ++ dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT)); ++ memset_io(dst, 0, PAGE_SIZE); ++ return 0; ++} ++ ++static int drm_memset_ttm_page(struct drm_ttm *ttm, unsigned long page) ++{ ++ struct page *d = drm_ttm_get_page(ttm, page); ++ void *dst; ++ ++ dst = kmap(d); ++ if (!dst) ++ return -ENOMEM; ++ ++ memset_io(dst, 0, PAGE_SIZE); ++ kunmap(d); ++ return 0; ++} ++ ++int drm_bo_move_zero(struct drm_buffer_object *bo, ++ int evict, int no_wait, struct drm_bo_mem_reg *new_mem) ++{ ++ struct drm_device *dev = bo->dev; ++ struct drm_mem_type_manager *man = &dev->bm.man[new_mem->mem_type]; ++ struct drm_ttm *ttm = bo->ttm; ++ void *new_iomap; ++ int ret; ++ struct drm_bo_mem_reg *old_mem = &bo->mem; ++ uint64_t save_flags = old_mem->flags; ++ uint64_t save_proposed_flags = old_mem->proposed_flags; ++ unsigned long i; ++ unsigned long page; ++ ++ ret = drm_mem_reg_ioremap(dev, new_mem, &new_iomap); ++ if (ret) ++ goto out; ++ ++ if (new_iomap == NULL && ttm == NULL) ++ goto out2; ++ ++ for (i = 0; i < new_mem->num_pages; ++i) { ++ if (new_iomap == NULL) ++ ret = drm_memset_ttm_page(ttm, i); ++ else ++ ret = drm_memset_io_page(new_iomap, i); ++ if (ret) ++ goto out1; ++ } ++ mb(); ++out2: ++ drm_bo_free_old_node(bo); ++ ++ *old_mem = *new_mem; ++ new_mem->mm_node = NULL; ++ old_mem->proposed_flags = save_proposed_flags; ++ DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE); ++ ++ if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (ttm != NULL)) { ++ drm_ttm_unbind(ttm); ++ drm_ttm_destroy(ttm); ++ bo->ttm = NULL; ++ } ++out1: ++ drm_mem_reg_iounmap(dev, new_mem, new_iomap); ++out: ++ return ret; ++} ++EXPORT_SYMBOL(drm_bo_move_zero); ++ ++/* ++ * Transfer a buffer object's memory and LRU status to a newly ++ * created object. User-space references remains with the old ++ * object. Call bo->mutex locked. ++ */ ++ ++int drm_buffer_object_transfer(struct drm_buffer_object *bo, ++ struct drm_buffer_object **new_obj) ++{ ++ struct drm_buffer_object *fbo; ++ struct drm_device *dev = bo->dev; ++ struct drm_buffer_manager *bm = &dev->bm; ++ ++ fbo = drm_ctl_calloc(1, sizeof(*fbo), DRM_MEM_BUFOBJ); ++ if (!fbo) ++ return -ENOMEM; ++ ++ *fbo = *bo; ++ mutex_init(&fbo->mutex); ++ mutex_lock(&fbo->mutex); ++ mutex_lock(&dev->struct_mutex); ++ ++ DRM_INIT_WAITQUEUE(&bo->event_queue); ++ INIT_LIST_HEAD(&fbo->ddestroy); ++ INIT_LIST_HEAD(&fbo->lru); ++ INIT_LIST_HEAD(&fbo->pinned_lru); ++#ifdef DRM_ODD_MM_COMPAT ++ INIT_LIST_HEAD(&fbo->vma_list); ++ INIT_LIST_HEAD(&fbo->p_mm_list); ++#endif ++ ++ fbo->fence = drm_fence_reference_locked(bo->fence); ++ fbo->pinned_node = NULL; ++ fbo->mem.mm_node->private = (void *)fbo; ++ atomic_set(&fbo->usage, 1); ++ atomic_inc(&bm->count); ++ mutex_unlock(&dev->struct_mutex); ++ mutex_unlock(&fbo->mutex); ++ ++ *new_obj = fbo; ++ return 0; ++} ++ ++/* ++ * Since move is underway, we need to block signals in this function. ++ * We cannot restart until it has finished. ++ */ ++ ++int drm_bo_move_accel_cleanup(struct drm_buffer_object *bo, ++ int evict, int no_wait, uint32_t fence_class, ++ uint32_t fence_type, uint32_t fence_flags, ++ struct drm_bo_mem_reg *new_mem) ++{ ++ struct drm_device *dev = bo->dev; ++ struct drm_mem_type_manager *man = &dev->bm.man[new_mem->mem_type]; ++ struct drm_bo_mem_reg *old_mem = &bo->mem; ++ int ret; ++ uint64_t save_flags = old_mem->flags; ++ uint64_t save_proposed_flags = old_mem->proposed_flags; ++ struct drm_buffer_object *old_obj; ++ ++ if (bo->fence) ++ drm_fence_usage_deref_unlocked(&bo->fence); ++ ret = drm_fence_object_create(dev, fence_class, fence_type, ++ fence_flags | DRM_FENCE_FLAG_EMIT, ++ &bo->fence); ++ bo->fence_type = fence_type; ++ if (ret) ++ return ret; ++ ++#ifdef DRM_ODD_MM_COMPAT ++ /* ++ * In this mode, we don't allow pipelining a copy blit, ++ * since the buffer will be accessible from user space ++ * the moment we return and rebuild the page tables. ++ * ++ * With normal vm operation, page tables are rebuilt ++ * on demand using fault(), which waits for buffer idle. ++ */ ++ if (1) ++#else ++ if (evict || ((bo->mem.mm_node == bo->pinned_node) && ++ bo->mem.mm_node != NULL)) ++#endif ++ { ++ if (bo->fence) { ++ (void) drm_fence_object_wait(bo->fence, 0, 1, ++ bo->fence_type); ++ drm_fence_usage_deref_unlocked(&bo->fence); ++ } ++ drm_bo_free_old_node(bo); ++ ++ if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm != NULL)) { ++ drm_ttm_unbind(bo->ttm); ++ drm_ttm_destroy(bo->ttm); ++ bo->ttm = NULL; ++ } ++ } else { ++ ++ /* This should help pipeline ordinary buffer moves. ++ * ++ * Hang old buffer memory on a new buffer object, ++ * and leave it to be released when the GPU ++ * operation has completed. ++ */ ++ ++ ret = drm_buffer_object_transfer(bo, &old_obj); ++ ++ if (ret) ++ return ret; ++ ++ if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED)) ++ old_obj->ttm = NULL; ++ else ++ bo->ttm = NULL; ++ ++ mutex_lock(&dev->struct_mutex); ++ list_del_init(&old_obj->lru); ++ DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); ++ drm_bo_add_to_lru(old_obj); ++ ++ drm_bo_usage_deref_locked(&old_obj); ++ mutex_unlock(&dev->struct_mutex); ++ ++ } ++ ++ *old_mem = *new_mem; ++ new_mem->mm_node = NULL; ++ old_mem->proposed_flags = save_proposed_flags; ++ DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE); ++ return 0; ++} ++EXPORT_SYMBOL(drm_bo_move_accel_cleanup); ++ ++int drm_bo_same_page(unsigned long offset, ++ unsigned long offset2) ++{ ++ return (offset & PAGE_MASK) == (offset2 & PAGE_MASK); ++} ++EXPORT_SYMBOL(drm_bo_same_page); ++ ++unsigned long drm_bo_offset_end(unsigned long offset, ++ unsigned long end) ++{ ++ offset = (offset + PAGE_SIZE) & PAGE_MASK; ++ return (end < offset) ? end : offset; ++} ++EXPORT_SYMBOL(drm_bo_offset_end); ++ ++static pgprot_t drm_kernel_io_prot(uint32_t map_type) ++{ ++ pgprot_t tmp = PAGE_KERNEL; ++ ++#if defined(__i386__) || defined(__x86_64__) ++ if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) { ++ pgprot_val(tmp) |= _PAGE_PCD; ++ pgprot_val(tmp) &= ~_PAGE_PWT; ++#if defined(CONFIG_X86_PAT) ++ /* for a scatter gather backed map, use ++ WC page bits */ ++ if (((map_type == _DRM_FRAME_BUFFER) || (map_type == _DRM_SCATTER_GATHER)) && pat_enabled) ++ tmp = PAGE_KERNEL_WC; ++#endif ++ } ++#elif defined(__powerpc__) ++ pgprot_val(tmp) |= _PAGE_NO_CACHE; ++ if (map_type == _DRM_REGISTERS) ++ pgprot_val(tmp) |= _PAGE_GUARDED; ++#endif ++#if defined(__ia64__) ++ if (map_type == _DRM_TTM) ++ tmp = pgprot_writecombine(tmp); ++ else ++ tmp = pgprot_noncached(tmp); ++#endif ++ return tmp; ++} ++ ++static int drm_bo_ioremap(struct drm_buffer_object *bo, unsigned long bus_base, ++ unsigned long bus_offset, unsigned long bus_size, ++ struct drm_bo_kmap_obj *map) ++{ ++ struct drm_device *dev = bo->dev; ++ struct drm_bo_mem_reg *mem = &bo->mem; ++ struct drm_mem_type_manager *man = &dev->bm.man[mem->mem_type]; ++ ++ if (!(man->flags & _DRM_FLAG_NEEDS_IOREMAP)) { ++ map->bo_kmap_type = bo_map_premapped; ++ map->virtual = (void *)(((u8 *) man->io_addr) + bus_offset); ++ } else { ++ map->bo_kmap_type = bo_map_iomap; ++ map->virtual = ioremap_nocache(bus_base + bus_offset, bus_size); ++ } ++ return (!map->virtual) ? -ENOMEM : 0; ++} ++ ++static int drm_bo_kmap_ttm(struct drm_buffer_object *bo, ++ unsigned long start_page, unsigned long num_pages, ++ struct drm_bo_kmap_obj *map) ++{ ++ struct drm_device *dev = bo->dev; ++ struct drm_bo_mem_reg *mem = &bo->mem; ++ struct drm_mem_type_manager *man = &dev->bm.man[mem->mem_type]; ++ pgprot_t prot; ++ struct drm_ttm *ttm = bo->ttm; ++ struct page *d; ++ int i; ++ ++ BUG_ON(!ttm); ++ ++ if (num_pages == 1 && (mem->flags & DRM_BO_FLAG_CACHED)) { ++ ++ /* ++ * We're mapping a single page, and the desired ++ * page protection is consistent with the bo. ++ */ ++ ++ map->bo_kmap_type = bo_map_kmap; ++ map->page = drm_ttm_get_page(ttm, start_page); ++ map->virtual = kmap(map->page); ++ } else { ++ /* ++ * Populate the part we're mapping; ++ */ ++ ++ for (i = start_page; i < start_page + num_pages; ++i) { ++ d = drm_ttm_get_page(ttm, i); ++ if (!d) ++ return -ENOMEM; ++ } ++ ++ /* ++ * We need to use vmap to get the desired page protection ++ * or to make the buffer object look contigous. ++ */ ++ ++ prot = (mem->flags & DRM_BO_FLAG_CACHED) ? ++ PAGE_KERNEL : ++ drm_kernel_io_prot(man->drm_bus_maptype); ++ map->bo_kmap_type = bo_map_vmap; ++ map->virtual = vmap(ttm->pages + start_page, ++ num_pages, 0, prot); ++ } ++ return (!map->virtual) ? -ENOMEM : 0; ++} ++ ++/* ++ * This function is to be used for kernel mapping of buffer objects. ++ * It chooses the appropriate mapping method depending on the memory type ++ * and caching policy the buffer currently has. ++ * Mapping multiple pages or buffers that live in io memory is a bit slow and ++ * consumes vmalloc space. Be restrictive with such mappings. ++ * Mapping single pages usually returns the logical kernel address, ++ * (which is fast) ++ * BUG may use slower temporary mappings for high memory pages or ++ * uncached / write-combined pages. ++ * ++ * The function fills in a drm_bo_kmap_obj which can be used to return the ++ * kernel virtual address of the buffer. ++ * ++ * Code servicing a non-priviliged user request is only allowed to map one ++ * page at a time. We might need to implement a better scheme to stop such ++ * processes from consuming all vmalloc space. ++ */ ++ ++int drm_bo_kmap(struct drm_buffer_object *bo, unsigned long start_page, ++ unsigned long num_pages, struct drm_bo_kmap_obj *map) ++{ ++ int ret; ++ unsigned long bus_base; ++ unsigned long bus_offset; ++ unsigned long bus_size; ++ ++ map->virtual = NULL; ++ ++ if (num_pages > bo->num_pages) ++ return -EINVAL; ++ if (start_page > bo->num_pages) ++ return -EINVAL; ++#if 0 ++ if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC)) ++ return -EPERM; ++#endif ++ ret = drm_bo_pci_offset(bo->dev, &bo->mem, &bus_base, ++ &bus_offset, &bus_size); ++ ++ if (ret) ++ return ret; ++ ++ /* clear the clean flags */ ++ bo->mem.flags &= ~DRM_BO_FLAG_CLEAN; ++ bo->mem.proposed_flags &= ~DRM_BO_FLAG_CLEAN; ++ ++ if (bus_size == 0) { ++ return drm_bo_kmap_ttm(bo, start_page, num_pages, map); ++ } else { ++ bus_offset += start_page << PAGE_SHIFT; ++ bus_size = num_pages << PAGE_SHIFT; ++ return drm_bo_ioremap(bo, bus_base, bus_offset, bus_size, map); ++ } ++} ++EXPORT_SYMBOL(drm_bo_kmap); ++ ++void drm_bo_kunmap(struct drm_bo_kmap_obj *map) ++{ ++ if (!map->virtual) ++ return; ++ ++ switch (map->bo_kmap_type) { ++ case bo_map_iomap: ++ iounmap(map->virtual); ++ break; ++ case bo_map_vmap: ++ vunmap(map->virtual); ++ break; ++ case bo_map_kmap: ++ kunmap(map->page); ++ break; ++ case bo_map_premapped: ++ break; ++ default: ++ BUG(); ++ } ++ map->virtual = NULL; ++ map->page = NULL; ++} ++EXPORT_SYMBOL(drm_bo_kunmap); ++ ++int drm_bo_pfn_prot(struct drm_buffer_object *bo, ++ unsigned long dst_offset, ++ unsigned long *pfn, ++ pgprot_t *prot) ++{ ++ struct drm_bo_mem_reg *mem = &bo->mem; ++ struct drm_device *dev = bo->dev; ++ unsigned long bus_offset; ++ unsigned long bus_size; ++ unsigned long bus_base; ++ struct drm_mem_type_manager *man = &dev->bm.man[mem->mem_type]; ++ int ret; ++ ++ ret = drm_bo_pci_offset(dev, mem, &bus_base, &bus_offset, ++ &bus_size); ++ if (ret) ++ return -EINVAL; ++ ++ if (bus_size != 0) ++ *pfn = (bus_base + bus_offset + dst_offset) >> PAGE_SHIFT; ++ else if (!bo->ttm) ++ return -EINVAL; ++ else ++ *pfn = page_to_pfn(drm_ttm_get_page(bo->ttm, dst_offset >> PAGE_SHIFT)); ++ ++ *prot = (mem->flags & DRM_BO_FLAG_CACHED) ? ++ PAGE_KERNEL : drm_kernel_io_prot(man->drm_bus_maptype); ++ ++ return 0; ++} ++EXPORT_SYMBOL(drm_bo_pfn_prot); ++ +diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c +index 72c667f..19039d8 100644 +--- a/drivers/gpu/drm/drm_bufs.c ++++ b/drivers/gpu/drm/drm_bufs.c +@@ -210,7 +210,7 @@ static int drm_addmap_core(struct drm_device * dev, unsigned int offset, + map->offset = (unsigned long)map->handle; + if (map->flags & _DRM_CONTAINS_LOCK) { + /* Prevent a 2nd X Server from creating a 2nd lock */ +- if (dev->primary->master->lock.hw_lock != NULL) { ++ if (dev->primary->master->lock.hw_lock != &dev->default_lock) { + vfree(map->handle); + drm_free(map, sizeof(*map), DRM_MEM_MAPS); + return -EBUSY; +@@ -417,8 +417,8 @@ int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map) + vfree(map->handle); + if (master) { + if (dev->sigdata.lock == master->lock.hw_lock) +- dev->sigdata.lock = NULL; +- master->lock.hw_lock = NULL; /* SHM removed */ ++ dev->sigdata.lock = &dev->default_lock; ++ master->lock.hw_lock = &dev->default_lock; + master->lock.file_priv = NULL; + wake_up_interruptible(&master->lock.lock_queue); + } +@@ -435,6 +435,9 @@ int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map) + case _DRM_GEM: + DRM_ERROR("tried to rmmap GEM object\n"); + break; ++ case _DRM_TTM: ++ BUG_ON(1); ++ break; + } + drm_free(map, sizeof(*map), DRM_MEM_MAPS); + +@@ -1528,6 +1531,7 @@ int drm_mapbufs(struct drm_device *dev, void *data, + dev->buf_use++; /* Can't allocate more after this call */ + spin_unlock(&dev->count_lock); + ++ DRM_DEBUG("dma buf count %d, req count %d\n", request->count, dma->buf_count); + if (request->count >= dma->buf_count) { + if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP)) + || (drm_core_check_feature(dev, DRIVER_SG) +@@ -1538,6 +1542,7 @@ int drm_mapbufs(struct drm_device *dev, void *data, + unsigned long token = dev->agp_buffer_token; + + if (!map) { ++ DRM_DEBUG("No map\n"); + retcode = -EINVAL; + goto done; + } +@@ -1555,6 +1560,7 @@ int drm_mapbufs(struct drm_device *dev, void *data, + up_write(¤t->mm->mmap_sem); + } + if (virtual > -1024UL) { ++ DRM_DEBUG("mmap failed\n"); + /* Real error */ + retcode = (signed long)virtual; + goto done; +diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c +index 53c8725..b45c867 100644 +--- a/drivers/gpu/drm/drm_crtc.c ++++ b/drivers/gpu/drm/drm_crtc.c +@@ -1507,7 +1507,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data, + set.mode = mode; + set.connectors = connector_set; + set.num_connectors = crtc_req->count_connectors; +- set.fb =fb; ++ set.fb = fb; + ret = crtc->funcs->set_config(&set); + + out: +diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c +index 58e3359..d0ec77c 100644 +--- a/drivers/gpu/drm/drm_crtc_helper.c ++++ b/drivers/gpu/drm/drm_crtc_helper.c +@@ -704,6 +704,8 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) + if (set->crtc->fb != set->fb) + set->crtc->fb = set->fb; + crtc_funcs->mode_set_base(set->crtc, set->x, set->y); ++ set->crtc->x = set->x; ++ set->crtc->y = set->y; + } + + kfree(save_encoders); +@@ -818,3 +820,30 @@ int drm_helper_resume_force_mode(struct drm_device *dev) + return 0; + } + EXPORT_SYMBOL(drm_helper_resume_force_mode); ++ ++void drm_helper_set_connector_dpms(struct drm_connector *connector, ++ int dpms_mode) ++{ ++ int i = 0; ++ struct drm_encoder *encoder; ++ struct drm_encoder_helper_funcs *encoder_funcs; ++ struct drm_mode_object *obj; ++ ++ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { ++ if (connector->encoder_ids[i] == 0) ++ break; ++ ++ obj = drm_mode_object_find(connector->dev, ++ connector->encoder_ids[i], ++ DRM_MODE_OBJECT_ENCODER); ++ if (!obj) ++ continue; ++ ++ encoder = obj_to_encoder(obj); ++ encoder_funcs = encoder->helper_private; ++ if (encoder_funcs->dpms) ++ encoder_funcs->dpms(encoder, dpms_mode); ++ ++ } ++} ++EXPORT_SYMBOL(drm_helper_set_connector_dpms); +diff --git a/drivers/gpu/drm/drm_dma.c b/drivers/gpu/drm/drm_dma.c +index 7a8e2fb..0b2df71 100644 +--- a/drivers/gpu/drm/drm_dma.c ++++ b/drivers/gpu/drm/drm_dma.c +@@ -58,6 +58,7 @@ int drm_dma_setup(struct drm_device *dev) + + return 0; + } ++EXPORT_SYMBOL(drm_dma_setup); + + /** + * Cleanup the DMA resources. +@@ -120,6 +121,7 @@ void drm_dma_takedown(struct drm_device *dev) + drm_free(dev->dma, sizeof(*dev->dma), DRM_MEM_DRIVER); + dev->dma = NULL; + } ++EXPORT_SYMBOL(drm_dma_takedown); + + /** + * Free a buffer. +diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c +index 373e3de..63e8c1f 100644 +--- a/drivers/gpu/drm/drm_drv.c ++++ b/drivers/gpu/drm/drm_drv.c +@@ -165,9 +165,13 @@ int drm_lastclose(struct drm_device * dev) + + if (dev->driver->lastclose) + dev->driver->lastclose(dev); ++ + DRM_DEBUG("driver lastclose completed\n"); + + if (dev->irq_enabled && !drm_core_check_feature(dev, DRIVER_MODESET)) ++ drm_bo_driver_finish(dev); ++ ++ if (dev->irq_enabled) + drm_irq_uninstall(dev); + + mutex_lock(&dev->struct_mutex); +@@ -316,14 +320,14 @@ static void drm_cleanup(struct drm_device * dev) + DRM_DEBUG("mtrr_del=%d\n", retval); + } + ++ if (dev->driver->unload) ++ dev->driver->unload(dev); ++ + if (drm_core_has_AGP(dev) && dev->agp) { + drm_free(dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS); + dev->agp = NULL; + } + +- if (dev->driver->unload) +- dev->driver->unload(dev); +- + drm_ht_remove(&dev->map_hash); + drm_ctxbitmap_cleanup(dev); + +@@ -360,9 +364,34 @@ static const struct file_operations drm_stub_fops = { + static int __init drm_core_init(void) + { + int ret = -ENOMEM; ++ struct sysinfo si; ++ unsigned long avail_memctl_mem; ++ unsigned long max_memctl_mem; + + idr_init(&drm_minors_idr); + ++ si_meminfo(&si); ++ ++ /* ++ * AGP only allows low / DMA32 memory ATM. ++ */ ++ ++ avail_memctl_mem = si.totalram - si.totalhigh; ++ ++ /* ++ * Avoid overflows ++ */ ++ ++ max_memctl_mem = 1UL << (32 - PAGE_SHIFT); ++ max_memctl_mem = (max_memctl_mem / si.mem_unit) * PAGE_SIZE; ++ ++ if (avail_memctl_mem >= max_memctl_mem) ++ avail_memctl_mem = max_memctl_mem; ++ ++ drm_init_memctl(avail_memctl_mem/2, avail_memctl_mem*3/4, si.mem_unit); ++ ++ ret = -ENOMEM; ++ + if (register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops)) + goto err_p1; + +diff --git a/drivers/gpu/drm/drm_fence.c b/drivers/gpu/drm/drm_fence.c +new file mode 100644 +index 0000000..f1c386c +--- /dev/null ++++ b/drivers/gpu/drm/drm_fence.c +@@ -0,0 +1,540 @@ ++/************************************************************************** ++ * ++ * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sub license, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial portions ++ * of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, ++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR ++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE ++ * USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ **************************************************************************/ ++/* ++ * Authors: Thomas Hellström ++ */ ++ ++#include "drmP.h" ++ ++ ++/* ++ * Convenience function to be called by fence::wait methods that ++ * need polling. ++ */ ++ ++int drm_fence_wait_polling(struct drm_fence_object *fence, int lazy, ++ int interruptible, uint32_t mask, ++ unsigned long end_jiffies) ++{ ++ struct drm_device *dev = fence->dev; ++ struct drm_fence_manager *fm = &dev->fm; ++ struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class]; ++ uint32_t count = 0; ++ int ret; ++ ++ DECLARE_WAITQUEUE(entry, current); ++ add_wait_queue(&fc->fence_queue, &entry); ++ ++ ret = 0; ++ ++ for (;;) { ++ __set_current_state((interruptible) ? ++ TASK_INTERRUPTIBLE : ++ TASK_UNINTERRUPTIBLE); ++ if (drm_fence_object_signaled(fence, mask)) ++ break; ++ if (time_after_eq(jiffies, end_jiffies)) { ++ ret = -EBUSY; ++ break; ++ } ++ if (lazy) ++ schedule_timeout(1); ++ else if ((++count & 0x0F) == 0){ ++ __set_current_state(TASK_RUNNING); ++ schedule(); ++ __set_current_state((interruptible) ? ++ TASK_INTERRUPTIBLE : ++ TASK_UNINTERRUPTIBLE); ++ } ++ if (interruptible && signal_pending(current)) { ++ ret = -EAGAIN; ++ break; ++ } ++ } ++ __set_current_state(TASK_RUNNING); ++ remove_wait_queue(&fc->fence_queue, &entry); ++ return ret; ++} ++EXPORT_SYMBOL(drm_fence_wait_polling); ++ ++/* ++ * Typically called by the IRQ handler. ++ */ ++ ++void drm_fence_handler(struct drm_device *dev, uint32_t fence_class, ++ uint32_t sequence, uint32_t type, uint32_t error) ++{ ++ int wake = 0; ++ uint32_t diff; ++ uint32_t relevant_type; ++ uint32_t new_type; ++ struct drm_fence_manager *fm = &dev->fm; ++ struct drm_fence_class_manager *fc = &fm->fence_class[fence_class]; ++ struct drm_fence_driver *driver = dev->driver->fence_driver; ++ struct list_head *head; ++ struct drm_fence_object *fence, *next; ++ int found = 0; ++ ++ if (list_empty(&fc->ring)) ++ return; ++ ++ list_for_each_entry(fence, &fc->ring, ring) { ++ diff = (sequence - fence->sequence) & driver->sequence_mask; ++ if (diff > driver->wrap_diff) { ++ found = 1; ++ break; ++ } ++ } ++ ++ fc->waiting_types &= ~type; ++ head = (found) ? &fence->ring : &fc->ring; ++ ++ list_for_each_entry_safe_reverse(fence, next, head, ring) { ++ if (&fence->ring == &fc->ring) ++ break; ++ ++ if (error) { ++ fence->error = error; ++ fence->signaled_types = fence->type; ++ list_del_init(&fence->ring); ++ wake = 1; ++ break; ++ } ++ ++ if (type & DRM_FENCE_TYPE_EXE) ++ type |= fence->native_types; ++ ++ relevant_type = type & fence->type; ++ new_type = (fence->signaled_types | relevant_type) ^ ++ fence->signaled_types; ++ ++ if (new_type) { ++ fence->signaled_types |= new_type; ++ DRM_DEBUG("Fence %p signaled 0x%08x\n", ++ fence, fence->signaled_types); ++ ++ if (driver->needed_flush) ++ fc->pending_flush |= driver->needed_flush(fence); ++ ++ if (new_type & fence->waiting_types) ++ wake = 1; ++ } ++ ++ fc->waiting_types |= fence->waiting_types & ~fence->signaled_types; ++ ++ if (!(fence->type & ~fence->signaled_types)) { ++ DRM_DEBUG("Fence completely signaled %p\n", ++ fence); ++ list_del_init(&fence->ring); ++ } ++ } ++ ++ /* ++ * Reinstate lost waiting types. ++ */ ++ ++ if ((fc->waiting_types & type) != type) { ++ head = head->prev; ++ list_for_each_entry(fence, head, ring) { ++ if (&fence->ring == &fc->ring) ++ break; ++ diff = (fc->highest_waiting_sequence - fence->sequence) & ++ driver->sequence_mask; ++ if (diff > driver->wrap_diff) ++ break; ++ ++ fc->waiting_types |= fence->waiting_types & ~fence->signaled_types; ++ } ++ } ++ ++ if (wake) ++ wake_up_all(&fc->fence_queue); ++} ++EXPORT_SYMBOL(drm_fence_handler); ++ ++static void drm_fence_unring(struct drm_device *dev, struct list_head *ring) ++{ ++ struct drm_fence_manager *fm = &dev->fm; ++ unsigned long flags; ++ ++ write_lock_irqsave(&fm->lock, flags); ++ list_del_init(ring); ++ write_unlock_irqrestore(&fm->lock, flags); ++} ++ ++void drm_fence_usage_deref_locked(struct drm_fence_object **fence) ++{ ++ struct drm_fence_object *tmp_fence = *fence; ++ struct drm_device *dev = tmp_fence->dev; ++ struct drm_fence_manager *fm = &dev->fm; ++ ++ DRM_ASSERT_LOCKED(&dev->struct_mutex); ++ *fence = NULL; ++ if (atomic_dec_and_test(&tmp_fence->usage)) { ++ drm_fence_unring(dev, &tmp_fence->ring); ++ DRM_DEBUG("Destroyed a fence object %p\n", ++ tmp_fence); ++ atomic_dec(&fm->count); ++ drm_ctl_free(tmp_fence, sizeof(*tmp_fence), DRM_MEM_FENCE); ++ } ++} ++EXPORT_SYMBOL(drm_fence_usage_deref_locked); ++ ++void drm_fence_usage_deref_unlocked(struct drm_fence_object **fence) ++{ ++ struct drm_fence_object *tmp_fence = *fence; ++ struct drm_device *dev = tmp_fence->dev; ++ struct drm_fence_manager *fm = &dev->fm; ++ ++ *fence = NULL; ++ if (atomic_dec_and_test(&tmp_fence->usage)) { ++ mutex_lock(&dev->struct_mutex); ++ if (atomic_read(&tmp_fence->usage) == 0) { ++ drm_fence_unring(dev, &tmp_fence->ring); ++ atomic_dec(&fm->count); ++ drm_ctl_free(tmp_fence, sizeof(*tmp_fence), DRM_MEM_FENCE); ++ } ++ mutex_unlock(&dev->struct_mutex); ++ } ++} ++EXPORT_SYMBOL(drm_fence_usage_deref_unlocked); ++ ++struct drm_fence_object ++*drm_fence_reference_locked(struct drm_fence_object *src) ++{ ++ DRM_ASSERT_LOCKED(&src->dev->struct_mutex); ++ ++ atomic_inc(&src->usage); ++ return src; ++} ++ ++void drm_fence_reference_unlocked(struct drm_fence_object **dst, ++ struct drm_fence_object *src) ++{ ++ mutex_lock(&src->dev->struct_mutex); ++ *dst = src; ++ atomic_inc(&src->usage); ++ mutex_unlock(&src->dev->struct_mutex); ++} ++EXPORT_SYMBOL(drm_fence_reference_unlocked); ++ ++int drm_fence_object_signaled(struct drm_fence_object *fence, uint32_t mask) ++{ ++ unsigned long flags; ++ int signaled; ++ struct drm_device *dev = fence->dev; ++ struct drm_fence_manager *fm = &dev->fm; ++ struct drm_fence_driver *driver = dev->driver->fence_driver; ++ ++ mask &= fence->type; ++ read_lock_irqsave(&fm->lock, flags); ++ signaled = (mask & fence->signaled_types) == mask; ++ read_unlock_irqrestore(&fm->lock, flags); ++ if (!signaled && driver->poll) { ++ write_lock_irqsave(&fm->lock, flags); ++ driver->poll(dev, fence->fence_class, mask); ++ signaled = (mask & fence->signaled_types) == mask; ++ write_unlock_irqrestore(&fm->lock, flags); ++ } ++ return signaled; ++} ++EXPORT_SYMBOL(drm_fence_object_signaled); ++ ++ ++int drm_fence_object_flush(struct drm_fence_object *fence, ++ uint32_t type) ++{ ++ struct drm_device *dev = fence->dev; ++ struct drm_fence_manager *fm = &dev->fm; ++ struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class]; ++ struct drm_fence_driver *driver = dev->driver->fence_driver; ++ unsigned long irq_flags; ++ uint32_t saved_pending_flush; ++ uint32_t diff; ++ int call_flush; ++ ++ if (type & ~fence->type) { ++ DRM_ERROR("Flush trying to extend fence type, " ++ "0x%x, 0x%x\n", type, fence->type); ++ return -EINVAL; ++ } ++ ++ write_lock_irqsave(&fm->lock, irq_flags); ++ fence->waiting_types |= type; ++ fc->waiting_types |= fence->waiting_types; ++ diff = (fence->sequence - fc->highest_waiting_sequence) & ++ driver->sequence_mask; ++ ++ if (diff < driver->wrap_diff) ++ fc->highest_waiting_sequence = fence->sequence; ++ ++ /* ++ * fence->waiting_types has changed. Determine whether ++ * we need to initiate some kind of flush as a result of this. ++ */ ++ ++ saved_pending_flush = fc->pending_flush; ++ if (driver->needed_flush) ++ fc->pending_flush |= driver->needed_flush(fence); ++ ++ if (driver->poll) ++ driver->poll(dev, fence->fence_class, fence->waiting_types); ++ ++ call_flush = fc->pending_flush; ++ write_unlock_irqrestore(&fm->lock, irq_flags); ++ ++ if (call_flush && driver->flush) ++ driver->flush(dev, fence->fence_class); ++ ++ return 0; ++} ++EXPORT_SYMBOL(drm_fence_object_flush); ++ ++/* ++ * Make sure old fence objects are signaled before their fence sequences are ++ * wrapped around and reused. ++ */ ++ ++void drm_fence_flush_old(struct drm_device *dev, uint32_t fence_class, ++ uint32_t sequence) ++{ ++ struct drm_fence_manager *fm = &dev->fm; ++ struct drm_fence_class_manager *fc = &fm->fence_class[fence_class]; ++ struct drm_fence_object *fence; ++ unsigned long irq_flags; ++ struct drm_fence_driver *driver = dev->driver->fence_driver; ++ int call_flush; ++ ++ uint32_t diff; ++ ++ write_lock_irqsave(&fm->lock, irq_flags); ++ ++ list_for_each_entry_reverse(fence, &fc->ring, ring) { ++ diff = (sequence - fence->sequence) & driver->sequence_mask; ++ if (diff <= driver->flush_diff) ++ break; ++ ++ fence->waiting_types = fence->type; ++ fc->waiting_types |= fence->type; ++ ++ if (driver->needed_flush) ++ fc->pending_flush |= driver->needed_flush(fence); ++ } ++ ++ if (driver->poll) ++ driver->poll(dev, fence_class, fc->waiting_types); ++ ++ call_flush = fc->pending_flush; ++ write_unlock_irqrestore(&fm->lock, irq_flags); ++ ++ if (call_flush && driver->flush) ++ driver->flush(dev, fence->fence_class); ++ ++ /* ++ * FIXME: Shold we implement a wait here for really old fences? ++ */ ++ ++} ++EXPORT_SYMBOL(drm_fence_flush_old); ++ ++int drm_fence_object_wait(struct drm_fence_object *fence, ++ int lazy, int ignore_signals, uint32_t mask) ++{ ++ struct drm_device *dev = fence->dev; ++ struct drm_fence_driver *driver = dev->driver->fence_driver; ++ struct drm_fence_manager *fm = &dev->fm; ++ struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class]; ++ int ret = 0; ++ unsigned long _end = 3 * DRM_HZ; ++ ++ if (mask & ~fence->type) { ++ DRM_ERROR("Wait trying to extend fence type" ++ " 0x%08x 0x%08x\n", mask, fence->type); ++ BUG(); ++ return -EINVAL; ++ } ++ ++ if (driver->wait) ++ return driver->wait(fence, lazy, !ignore_signals, mask); ++ ++ drm_fence_object_flush(fence, mask); ++ if (driver->has_irq(dev, fence->fence_class, mask)) { ++ if (!ignore_signals) ++ ret = wait_event_interruptible_timeout ++ (fc->fence_queue, ++ drm_fence_object_signaled(fence, mask), ++ 3 * DRM_HZ); ++ else ++ ret = wait_event_timeout ++ (fc->fence_queue, ++ drm_fence_object_signaled(fence, mask), ++ 3 * DRM_HZ); ++ ++ if (unlikely(ret == -ERESTARTSYS)) ++ return -EAGAIN; ++ ++ if (unlikely(ret == 0)) ++ return -EBUSY; ++ ++ return 0; ++ } ++ ++ return drm_fence_wait_polling(fence, lazy, !ignore_signals, mask, ++ _end); ++} ++EXPORT_SYMBOL(drm_fence_object_wait); ++ ++int drm_fence_object_emit(struct drm_fence_object *fence, uint32_t fence_flags, ++ uint32_t fence_class, uint32_t type) ++{ ++ struct drm_device *dev = fence->dev; ++ struct drm_fence_manager *fm = &dev->fm; ++ struct drm_fence_driver *driver = dev->driver->fence_driver; ++ struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class]; ++ unsigned long flags; ++ uint32_t sequence; ++ uint32_t native_types; ++ int ret; ++ ++ drm_fence_unring(dev, &fence->ring); ++ ret = driver->emit(dev, fence_class, fence_flags, &sequence, ++ &native_types); ++ if (ret) ++ return ret; ++ ++ write_lock_irqsave(&fm->lock, flags); ++ fence->fence_class = fence_class; ++ fence->type = type; ++ fence->waiting_types = 0; ++ fence->signaled_types = 0; ++ fence->error = 0; ++ fence->sequence = sequence; ++ fence->native_types = native_types; ++ if (list_empty(&fc->ring)) ++ fc->highest_waiting_sequence = sequence - 1; ++ list_add_tail(&fence->ring, &fc->ring); ++ fc->latest_queued_sequence = sequence; ++ write_unlock_irqrestore(&fm->lock, flags); ++ return 0; ++} ++EXPORT_SYMBOL(drm_fence_object_emit); ++ ++static int drm_fence_object_init(struct drm_device *dev, uint32_t fence_class, ++ uint32_t type, ++ uint32_t fence_flags, ++ struct drm_fence_object *fence) ++{ ++ int ret = 0; ++ unsigned long flags; ++ struct drm_fence_manager *fm = &dev->fm; ++ ++ mutex_lock(&dev->struct_mutex); ++ atomic_set(&fence->usage, 1); ++ mutex_unlock(&dev->struct_mutex); ++ ++ write_lock_irqsave(&fm->lock, flags); ++ INIT_LIST_HEAD(&fence->ring); ++ ++ /* ++ * Avoid hitting BUG() for kernel-only fence objects. ++ */ ++ ++ fence->fence_class = fence_class; ++ fence->type = type; ++ fence->signaled_types = 0; ++ fence->waiting_types = 0; ++ fence->sequence = 0; ++ fence->error = 0; ++ fence->dev = dev; ++ write_unlock_irqrestore(&fm->lock, flags); ++ if (fence_flags & DRM_FENCE_FLAG_EMIT) { ++ ret = drm_fence_object_emit(fence, fence_flags, ++ fence->fence_class, type); ++ } ++ return ret; ++} ++ ++int drm_fence_object_create(struct drm_device *dev, uint32_t fence_class, ++ uint32_t type, unsigned flags, ++ struct drm_fence_object **c_fence) ++{ ++ struct drm_fence_object *fence; ++ int ret; ++ struct drm_fence_manager *fm = &dev->fm; ++ ++ fence = drm_ctl_calloc(1, sizeof(*fence), DRM_MEM_FENCE); ++ if (!fence) { ++ DRM_ERROR("Out of memory creating fence object\n"); ++ return -ENOMEM; ++ } ++ ret = drm_fence_object_init(dev, fence_class, type, flags, fence); ++ if (ret) { ++ drm_fence_usage_deref_unlocked(&fence); ++ return ret; ++ } ++ *c_fence = fence; ++ atomic_inc(&fm->count); ++ ++ return 0; ++} ++EXPORT_SYMBOL(drm_fence_object_create); ++ ++void drm_fence_manager_init(struct drm_device *dev) ++{ ++ struct drm_fence_manager *fm = &dev->fm; ++ struct drm_fence_class_manager *fence_class; ++ struct drm_fence_driver *fed = dev->driver->fence_driver; ++ int i; ++ unsigned long flags; ++ ++ rwlock_init(&fm->lock); ++ write_lock_irqsave(&fm->lock, flags); ++ fm->initialized = 0; ++ if (!fed) ++ goto out_unlock; ++ ++ fm->initialized = 1; ++ fm->num_classes = fed->num_classes; ++ BUG_ON(fm->num_classes > _DRM_FENCE_CLASSES); ++ ++ for (i = 0; i < fm->num_classes; ++i) { ++ fence_class = &fm->fence_class[i]; ++ ++ memset(fence_class, 0, sizeof(*fence_class)); ++ INIT_LIST_HEAD(&fence_class->ring); ++ DRM_INIT_WAITQUEUE(&fence_class->fence_queue); ++ } ++ ++ atomic_set(&fm->count, 0); ++ out_unlock: ++ write_unlock_irqrestore(&fm->lock, flags); ++} ++ ++void drm_fence_manager_takedown(struct drm_device *dev) ++{ ++} ++ +diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c +index 3733e36..1c2ccd8 100644 +--- a/drivers/gpu/drm/drm_fops.c ++++ b/drivers/gpu/drm/drm_fops.c +@@ -474,6 +474,10 @@ int drm_release(struct inode *inode, struct file *filp) + } + mutex_unlock(&dev->ctxlist_mutex); + ++ ++ if (drm_core_check_feature(dev, DRIVER_MODESET)) ++ drm_fb_release(filp); ++ + mutex_lock(&dev->struct_mutex); + + if (file_priv->is_master) { +@@ -493,6 +497,7 @@ int drm_release(struct inode *inode, struct file *filp) + /* drop the reference held my the file priv */ + drm_master_put(&file_priv->master); + file_priv->is_master = 0; ++ + list_del(&file_priv->lhead); + mutex_unlock(&dev->struct_mutex); + +diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c +index 803bc9e..aa63663 100644 +--- a/drivers/gpu/drm/drm_memory.c ++++ b/drivers/gpu/drm/drm_memory.c +@@ -36,6 +36,112 @@ + #include + #include "drmP.h" + ++ ++static struct { ++ spinlock_t lock; ++ uint64_t cur_used; ++ uint64_t emer_used; ++ uint64_t low_threshold; ++ uint64_t high_threshold; ++ uint64_t emer_threshold; ++} drm_memctl = { ++ .lock = SPIN_LOCK_UNLOCKED ++}; ++ ++static inline size_t drm_size_align(size_t size) ++{ ++ size_t tmpSize = 4; ++ if (size > PAGE_SIZE) ++ return PAGE_ALIGN(size); ++ ++ while (tmpSize < size) ++ tmpSize <<= 1; ++ ++ return (size_t) tmpSize; ++} ++ ++int drm_alloc_memctl(size_t size) ++{ ++ int ret = 0; ++ unsigned long a_size = drm_size_align(size); ++ unsigned long new_used; ++ ++ spin_lock(&drm_memctl.lock); ++ new_used = drm_memctl.cur_used + a_size; ++ if (likely(new_used < drm_memctl.high_threshold)) { ++ drm_memctl.cur_used = new_used; ++ goto out; ++ } ++ ++ /* ++ * Allow small allocations from root-only processes to ++ * succeed until the emergency threshold is reached. ++ */ ++ ++ new_used += drm_memctl.emer_used; ++ if (unlikely(!DRM_SUSER(DRM_CURPROC) || ++ (a_size > 16*PAGE_SIZE) || ++ (new_used > drm_memctl.emer_threshold))) { ++ ret = -ENOMEM; ++ goto out; ++ } ++ ++ drm_memctl.cur_used = drm_memctl.high_threshold; ++ drm_memctl.emer_used = new_used - drm_memctl.high_threshold; ++out: ++ spin_unlock(&drm_memctl.lock); ++ return ret; ++} ++EXPORT_SYMBOL(drm_alloc_memctl); ++ ++ ++void drm_free_memctl(size_t size) ++{ ++ unsigned long a_size = drm_size_align(size); ++ ++ spin_lock(&drm_memctl.lock); ++ if (likely(a_size >= drm_memctl.emer_used)) { ++ a_size -= drm_memctl.emer_used; ++ drm_memctl.emer_used = 0; ++ } else { ++ drm_memctl.emer_used -= a_size; ++ a_size = 0; ++ } ++ drm_memctl.cur_used -= a_size; ++ spin_unlock(&drm_memctl.lock); ++} ++EXPORT_SYMBOL(drm_free_memctl); ++ ++void drm_query_memctl(uint64_t *cur_used, ++ uint64_t *emer_used, ++ uint64_t *low_threshold, ++ uint64_t *high_threshold, ++ uint64_t *emer_threshold) ++{ ++ spin_lock(&drm_memctl.lock); ++ *cur_used = drm_memctl.cur_used; ++ *emer_used = drm_memctl.emer_used; ++ *low_threshold = drm_memctl.low_threshold; ++ *high_threshold = drm_memctl.high_threshold; ++ *emer_threshold = drm_memctl.emer_threshold; ++ spin_unlock(&drm_memctl.lock); ++} ++EXPORT_SYMBOL(drm_query_memctl); ++ ++void drm_init_memctl(size_t p_low_threshold, ++ size_t p_high_threshold, ++ size_t unit_size) ++{ ++ spin_lock(&drm_memctl.lock); ++ drm_memctl.emer_used = 0; ++ drm_memctl.cur_used = 0; ++ drm_memctl.low_threshold = p_low_threshold * unit_size; ++ drm_memctl.high_threshold = p_high_threshold * unit_size; ++ drm_memctl.emer_threshold = (drm_memctl.high_threshold >> 4) + ++ drm_memctl.high_threshold; ++ spin_unlock(&drm_memctl.lock); ++} ++ + #ifdef DEBUG_MEMORY + #include "drm_memory_debug.h" + #else +@@ -77,6 +183,7 @@ void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area) + } + return pt; + } ++EXPORT_SYMBOL(drm_realloc); + + #if __OS_HAS_AGP + static void *agp_remap(unsigned long offset, unsigned long size, +diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c +index 5ca132a..5b5c4c6 100644 +--- a/drivers/gpu/drm/drm_stub.c ++++ b/drivers/gpu/drm/drm_stub.c +@@ -101,6 +101,7 @@ struct drm_master *drm_master_create(struct drm_minor *minor) + drm_ht_create(&master->magiclist, DRM_MAGIC_HASH_ORDER); + INIT_LIST_HEAD(&master->magicfree); + master->minor = minor; ++ master->lock.hw_lock = &minor->dev->default_lock; + + list_add_tail(&master->head, &minor->master_list); + +@@ -138,7 +139,7 @@ static void drm_master_destroy(struct kref *kref) + + drm_ht_remove(&master->magiclist); + +- if (master->lock.hw_lock) { ++ if (master->lock.hw_lock != &dev->default_lock) { + if (dev->sigdata.lock == master->lock.hw_lock) + dev->sigdata.lock = NULL; + master->lock.hw_lock = NULL; +@@ -201,6 +202,7 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev, + init_timer(&dev->timer); + mutex_init(&dev->struct_mutex); + mutex_init(&dev->ctxlist_mutex); ++ mutex_init(&dev->bm.evict_mutex); + + idr_init(&dev->drw_idr); + +@@ -212,7 +214,12 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev, + dev->hose = pdev->sysdata; + #endif + +- if (drm_ht_create(&dev->map_hash, 12)) { ++ if (drm_ht_create(&dev->map_hash, DRM_MAP_HASH_ORDER)) { ++ return -ENOMEM; ++ } ++ if (drm_mm_init(&dev->offset_manager, DRM_FILE_PAGE_OFFSET_START, ++ DRM_FILE_PAGE_OFFSET_SIZE)) { ++ drm_ht_remove(&dev->map_hash); + return -ENOMEM; + } + +@@ -245,7 +252,6 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev, + } + } + +- + retcode = drm_ctxbitmap_init(dev); + if (retcode) { + DRM_ERROR("Cannot allocate memory for context bitmap.\n"); +@@ -261,6 +267,13 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev, + } + } + ++ /* ++ * Set up default lock for DRI2, which doesn't need a lock. ++ * User space will override this in the legacy DRI case. ++ */ ++ dev->sigdata.lock = &dev->default_lock; ++ ++ drm_fence_manager_init(dev); + return 0; + + error_out_unreg: +@@ -459,3 +472,4 @@ int drm_put_minor(struct drm_minor **minor_p) + *minor_p = NULL; + return 0; + } ++EXPORT_SYMBOL(drm_put_minor); +diff --git a/drivers/gpu/drm/drm_ttm.c b/drivers/gpu/drm/drm_ttm.c +new file mode 100644 +index 0000000..0fe40e6 +--- /dev/null ++++ b/drivers/gpu/drm/drm_ttm.c +@@ -0,0 +1,473 @@ ++/************************************************************************** ++ * ++ * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sub license, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial portions ++ * of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, ++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR ++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE ++ * USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ **************************************************************************/ ++/* ++ * Authors: Thomas Hellström ++ */ ++ ++#include "drmP.h" ++#include ++ ++/** ++ * Allocates storage for pointers to the pages that back the ttm. ++ * ++ * Uses kmalloc if possible. Otherwise falls back to vmalloc. ++ */ ++static void drm_ttm_alloc_page_directory(struct drm_ttm *ttm) ++{ ++ unsigned long size = ttm->num_pages * sizeof(*ttm->pages); ++ ttm->pages = NULL; ++ ++ if (drm_alloc_memctl(size)) ++ return; ++ ++ if (size <= PAGE_SIZE) ++ ttm->pages = drm_calloc(1, size, DRM_MEM_TTM); ++ ++ if (!ttm->pages) { ++ ttm->pages = vmalloc_user(size); ++ if (ttm->pages) ++ ttm->page_flags |= DRM_TTM_PAGEDIR_VMALLOC; ++ } ++ if (!ttm->pages) ++ drm_free_memctl(size); ++} ++ ++static void drm_ttm_free_page_directory(struct drm_ttm *ttm) ++{ ++ unsigned long size = ttm->num_pages * sizeof(*ttm->pages); ++ ++ if (ttm->page_flags & DRM_TTM_PAGEDIR_VMALLOC) { ++ vfree(ttm->pages); ++ ttm->page_flags &= ~DRM_TTM_PAGEDIR_VMALLOC; ++ } else { ++ drm_free(ttm->pages, size, DRM_MEM_TTM); ++ } ++ drm_free_memctl(size); ++ ttm->pages = NULL; ++} ++ ++static struct page *drm_ttm_alloc_page(struct drm_ttm *ttm) ++{ ++ struct page *page; ++ ++ if (drm_alloc_memctl(PAGE_SIZE)) ++ return NULL; ++ ++ if (ttm->dev->bm.allocator_type == _DRM_BM_ALLOCATOR_UNCACHED) ++ page = drm_get_uncached_page(); ++ else ++ page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32); ++ ++ if (!page) { ++ drm_free_memctl(PAGE_SIZE); ++ return NULL; ++ } ++ return page; ++} ++ ++/* ++ * Change caching policy for the linear kernel map ++ * for range of pages in a ttm. ++ */ ++ ++static int drm_ttm_set_caching(struct drm_ttm *ttm, int noncached) ++{ ++ int i; ++ struct page **cur_page; ++ ++ if (ttm->dev->bm.allocator_type == _DRM_BM_ALLOCATOR_UNCACHED) ++ return 0; ++ ++ if ((ttm->page_flags & DRM_TTM_PAGE_UNCACHED) == noncached) ++ return 0; ++ ++ if (noncached) ++ drm_clflush_pages(ttm->pages, ttm->num_pages); ++ ++ for (i = 0; i < ttm->num_pages; ++i) { ++ cur_page = ttm->pages + i; ++ if (*cur_page) { ++ if (!PageHighMem(*cur_page)) { ++ if (noncached) { ++#ifdef CONFIG_X86 ++ set_memory_wc((unsigned long)page_address(*cur_page), 1); ++#else ++ map_page_into_agp(*cur_page); ++#endif ++ } else { ++ unmap_page_from_agp(*cur_page); ++ } ++ } ++ } ++ } ++ ++ DRM_FLAG_MASKED(ttm->page_flags, noncached, DRM_TTM_PAGE_UNCACHED); ++ ++ return 0; ++} ++ ++ ++static void drm_ttm_free_user_pages(struct drm_ttm *ttm) ++{ ++ int write; ++ int dirty; ++ struct page *page; ++ int i; ++ ++ BUG_ON(!(ttm->page_flags & DRM_TTM_PAGE_USER)); ++ write = ((ttm->page_flags & DRM_TTM_PAGE_WRITE) != 0); ++ dirty = ((ttm->page_flags & DRM_TTM_PAGE_USER_DIRTY) != 0); ++ ++ for (i = 0; i < ttm->num_pages; ++i) { ++ page = ttm->pages[i]; ++ if (page == NULL) ++ continue; ++ ++ if (page == ttm->dummy_read_page) { ++ BUG_ON(write); ++ continue; ++ } ++ ++ if (write && dirty && !PageReserved(page)) ++ set_page_dirty_lock(page); ++ ++ ttm->pages[i] = NULL; ++ put_page(page); ++ } ++} ++ ++static void drm_ttm_free_alloced_pages(struct drm_ttm *ttm) ++{ ++ int i; ++ struct drm_buffer_manager *bm = &ttm->dev->bm; ++ struct page **cur_page; ++ ++ for (i = 0; i < ttm->num_pages; ++i) { ++ cur_page = ttm->pages + i; ++ if (*cur_page) { ++ if (ttm->dev->bm.allocator_type == _DRM_BM_ALLOCATOR_UNCACHED) ++ drm_put_uncached_page(*cur_page); ++ else { ++ if (page_count(*cur_page) != 1) ++ DRM_ERROR("Erroneous page count. Leaking pages.\n"); ++ if (page_mapped(*cur_page)) ++ DRM_ERROR("Erroneous map count. Leaking page mappings.\n"); ++ __free_page(*cur_page); ++ } ++ drm_free_memctl(PAGE_SIZE); ++ --bm->cur_pages; ++ } ++ } ++} ++ ++/* ++ * Free all resources associated with a ttm. ++ */ ++ ++int drm_ttm_destroy(struct drm_ttm *ttm) ++{ ++ struct drm_ttm_backend *be; ++ ++ if (!ttm) ++ return 0; ++ ++ be = ttm->be; ++ if (be) { ++ be->func->destroy(be); ++ ttm->be = NULL; ++ } ++ ++ if (ttm->pages) { ++ if (ttm->page_flags & DRM_TTM_PAGE_UNCACHED) ++ drm_ttm_set_caching(ttm, 0); ++ ++ if (ttm->page_flags & DRM_TTM_PAGE_USER) ++ drm_ttm_free_user_pages(ttm); ++ else ++ drm_ttm_free_alloced_pages(ttm); ++ ++ drm_ttm_free_page_directory(ttm); ++ } ++ ++ drm_ctl_free(ttm, sizeof(*ttm), DRM_MEM_TTM); ++ return 0; ++} ++ ++struct page *drm_ttm_get_page(struct drm_ttm *ttm, int index) ++{ ++ struct page *p; ++ struct drm_buffer_manager *bm = &ttm->dev->bm; ++ ++ while(NULL == (p = ttm->pages[index])) { ++ p = drm_ttm_alloc_page(ttm); ++ if (!p) ++ return NULL; ++ ++ if (PageHighMem(p)) ++ ttm->pages[--ttm->first_himem_page] = p; ++ else ++ ttm->pages[++ttm->last_lomem_page] = p; ++ ++ ++bm->cur_pages; ++ } ++ return p; ++} ++EXPORT_SYMBOL(drm_ttm_get_page); ++ ++/** ++ * drm_ttm_set_user: ++ * ++ * @ttm: the ttm to map pages to. This must always be ++ * a freshly created ttm. ++ * ++ * @tsk: a pointer to the address space from which to map ++ * pages. ++ * ++ * @write: a boolean indicating that write access is desired ++ * ++ * start: the starting address ++ * ++ * Map a range of user addresses to a new ttm object. This ++ * provides access to user memory from the graphics device. ++ */ ++int drm_ttm_set_user(struct drm_ttm *ttm, ++ struct task_struct *tsk, ++ unsigned long start, ++ unsigned long num_pages) ++{ ++ struct mm_struct *mm = tsk->mm; ++ int ret; ++ int write = (ttm->page_flags & DRM_TTM_PAGE_WRITE) != 0; ++ ++ BUG_ON(num_pages != ttm->num_pages); ++ BUG_ON((ttm->page_flags & DRM_TTM_PAGE_USER) == 0); ++ ++ down_read(&mm->mmap_sem); ++ ret = get_user_pages(tsk, mm, start, num_pages, ++ write, 0, ttm->pages, NULL); ++ up_read(&mm->mmap_sem); ++ ++ if (ret != num_pages && write) { ++ drm_ttm_free_user_pages(ttm); ++ return -ENOMEM; ++ } ++ ++ return 0; ++} ++ ++/** ++ * drm_ttm_populate: ++ * ++ * @ttm: the object to allocate pages for ++ * ++ * Allocate pages for all unset page entries, then ++ * call the backend to create the hardware mappings ++ */ ++int drm_ttm_populate(struct drm_ttm *ttm) ++{ ++ struct page *page; ++ unsigned long i; ++ struct drm_ttm_backend *be; ++ ++ if (ttm->state != ttm_unpopulated) ++ return 0; ++ ++ be = ttm->be; ++ ++ for (i = 0; i < ttm->num_pages; ++i) { ++ page = drm_ttm_get_page(ttm, i); ++ if (!page) ++ return -ENOMEM; ++ } ++ ++ be->func->populate(be, ttm->num_pages, ttm->pages, ttm->dummy_read_page); ++ ttm->state = ttm_unbound; ++ return 0; ++} ++ ++/** ++ * drm_ttm_create: ++ * ++ * @dev: the drm_device ++ * ++ * @size: The size (in bytes) of the desired object ++ * ++ * @page_flags: various DRM_TTM_PAGE_* flags. See drm_object.h. ++ * ++ * Allocate and initialize a ttm, leaving it unpopulated at this time ++ */ ++ ++struct drm_ttm *drm_ttm_create(struct drm_device *dev, unsigned long size, ++ uint32_t page_flags, struct page *dummy_read_page) ++{ ++ struct drm_bo_driver *bo_driver = dev->driver->bo_driver; ++ struct drm_ttm *ttm; ++ ++ if (!bo_driver) ++ return NULL; ++ ++ ttm = drm_ctl_calloc(1, sizeof(*ttm), DRM_MEM_TTM); ++ if (!ttm) ++ return NULL; ++ ++ ttm->dev = dev; ++ atomic_set(&ttm->vma_count, 0); ++ ++ ttm->destroy = 0; ++ ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; ++ ttm->first_himem_page = ttm->num_pages; ++ ttm->last_lomem_page = -1; ++ ++ ttm->page_flags = page_flags; ++ ++ ttm->dummy_read_page = dummy_read_page; ++ ++ /* ++ * Account also for AGP module memory usage. ++ */ ++ ++ drm_ttm_alloc_page_directory(ttm); ++ if (!ttm->pages) { ++ drm_ttm_destroy(ttm); ++ DRM_ERROR("Failed allocating page table\n"); ++ return NULL; ++ } ++ ttm->be = bo_driver->create_ttm_backend_entry(dev); ++ if (!ttm->be) { ++ drm_ttm_destroy(ttm); ++ DRM_ERROR("Failed creating ttm backend entry\n"); ++ return NULL; ++ } ++ ttm->state = ttm_unpopulated; ++ return ttm; ++} ++ ++/** ++ * drm_ttm_evict: ++ * ++ * @ttm: the object to be unbound from the aperture. ++ * ++ * Transition a ttm from bound to evicted, where it ++ * isn't present in the aperture, but various caches may ++ * not be consistent. ++ */ ++void drm_ttm_evict(struct drm_ttm *ttm) ++{ ++ struct drm_ttm_backend *be = ttm->be; ++ int ret; ++ ++ if (ttm->state == ttm_bound) { ++ ret = be->func->unbind(be); ++ BUG_ON(ret); ++ } ++ ++ ttm->state = ttm_evicted; ++} ++ ++/** ++ * drm_ttm_fixup_caching: ++ * ++ * @ttm: the object to set unbound ++ * ++ * XXX this function is misnamed. Transition a ttm from evicted to ++ * unbound, flushing caches as appropriate. ++ */ ++void drm_ttm_fixup_caching(struct drm_ttm *ttm) ++{ ++ ++ if (ttm->state == ttm_evicted) { ++ struct drm_ttm_backend *be = ttm->be; ++ if (be->func->needs_ub_cache_adjust(be)) ++ drm_ttm_set_caching(ttm, 0); ++ ttm->state = ttm_unbound; ++ } ++} ++ ++/** ++ * drm_ttm_unbind: ++ * ++ * @ttm: the object to unbind from the graphics device ++ * ++ * Unbind an object from the aperture. This removes the mappings ++ * from the graphics device and flushes caches if necessary. ++ */ ++void drm_ttm_unbind(struct drm_ttm *ttm) ++{ ++ if (ttm->state == ttm_bound) ++ drm_ttm_evict(ttm); ++ ++ drm_ttm_fixup_caching(ttm); ++} ++ ++/** ++ * drm_ttm_bind: ++ * ++ * @ttm: the ttm object to bind to the graphics device ++ * ++ * @bo_mem: the aperture memory region which will hold the object ++ * ++ * Bind a ttm object to the aperture. This ensures that the necessary ++ * pages are allocated, flushes CPU caches as needed and marks the ++ * ttm as DRM_TTM_PAGE_USER_DIRTY to indicate that it may have been ++ * modified by the GPU ++ */ ++int drm_ttm_bind(struct drm_ttm *ttm, struct drm_bo_mem_reg *bo_mem) ++{ ++ struct drm_bo_driver *bo_driver = ttm->dev->driver->bo_driver; ++ int ret = 0; ++ struct drm_ttm_backend *be; ++ ++ if (!ttm) ++ return -EINVAL; ++ if (ttm->state == ttm_bound) ++ return 0; ++ ++ be = ttm->be; ++ ++ ret = drm_ttm_populate(ttm); ++ if (ret) ++ return ret; ++ ++ if (ttm->state == ttm_unbound && !(bo_mem->flags & DRM_BO_FLAG_CACHED)) ++ drm_ttm_set_caching(ttm, DRM_TTM_PAGE_UNCACHED); ++ else if ((bo_mem->flags & DRM_BO_FLAG_CACHED_MAPPED) && ++ bo_driver->ttm_cache_flush) ++ bo_driver->ttm_cache_flush(ttm); ++ ++ ret = be->func->bind(be, bo_mem); ++ if (ret) { ++ ttm->state = ttm_evicted; ++ DRM_ERROR("Couldn't bind backend.\n"); ++ return ret; ++ } ++ ++ ttm->state = ttm_bound; ++ if (ttm->page_flags & DRM_TTM_PAGE_USER) ++ ttm->page_flags |= DRM_TTM_PAGE_USER_DIRTY; ++ return 0; ++} ++EXPORT_SYMBOL(drm_ttm_bind); +diff --git a/drivers/gpu/drm/drm_uncached.c b/drivers/gpu/drm/drm_uncached.c +new file mode 100644 +index 0000000..9c7183b +--- /dev/null ++++ b/drivers/gpu/drm/drm_uncached.c +@@ -0,0 +1,138 @@ ++/* ++ * Copyright (c) Red Hat Inc. ++ ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sub license, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial portions ++ * of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL ++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING ++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: Dave Airlie ++ */ ++ ++/* simple list based uncached page allocator ++ * - Add chunks of 1MB to the allocator at a time. ++ * - Use page->lru to keep a free list ++ * - doesn't track currently in use pages ++ * ++ * TODO: Add shrinker support ++ */ ++ ++#include "drmP.h" ++#include ++ ++static struct list_head uncached_free_list; ++ ++static struct mutex uncached_mutex; ++static int uncached_inited; ++static int total_uncached_pages; ++ ++/* add 1MB at a time */ ++#define NUM_PAGES_TO_ADD 256 ++ ++static void drm_uncached_page_put(struct page *page) ++{ ++ unmap_page_from_agp(page); ++ put_page(page); ++ __free_page(page); ++} ++ ++int drm_uncached_add_pages_locked(int num_pages) ++{ ++ struct page *page; ++ int i; ++ ++ DRM_DEBUG("adding uncached memory %ld\n", num_pages * PAGE_SIZE); ++ for (i = 0; i < num_pages; i++) { ++ ++ page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32); ++ if (!page) { ++ DRM_ERROR("unable to get page %d\n", i); ++ return i; ++ } ++ ++ get_page(page); ++#ifdef CONFIG_X86 ++ set_memory_wc((unsigned long)page_address(page), 1); ++#else ++ map_page_into_agp(page); ++#endif ++ ++ list_add(&page->lru, &uncached_free_list); ++ total_uncached_pages++; ++ } ++ return i; ++} ++ ++struct page *drm_get_uncached_page(void) ++{ ++ struct page *page = NULL; ++ int ret; ++ ++ mutex_lock(&uncached_mutex); ++ if (list_empty(&uncached_free_list)) { ++ ret = drm_uncached_add_pages_locked(NUM_PAGES_TO_ADD); ++ if (ret == 0) ++ return NULL; ++ } ++ ++ page = list_first_entry(&uncached_free_list, struct page, lru); ++ list_del(&page->lru); ++ ++ mutex_unlock(&uncached_mutex); ++ return page; ++} ++ ++void drm_put_uncached_page(struct page *page) ++{ ++ mutex_lock(&uncached_mutex); ++ list_add(&page->lru, &uncached_free_list); ++ mutex_unlock(&uncached_mutex); ++} ++ ++void drm_uncached_release_all_pages(void) ++{ ++ struct page *page, *tmp; ++ ++ list_for_each_entry_safe(page, tmp, &uncached_free_list, lru) { ++ list_del(&page->lru); ++ drm_uncached_page_put(page); ++ } ++} ++ ++int drm_uncached_init(void) ++{ ++ ++ if (uncached_inited) ++ return 0; ++ ++ INIT_LIST_HEAD(&uncached_free_list); ++ ++ mutex_init(&uncached_mutex); ++ uncached_inited = 1; ++ return 0; ++ ++} ++ ++void drm_uncached_fini(void) ++{ ++ if (!uncached_inited) ++ return; ++ ++ uncached_inited = 0; ++ drm_uncached_release_all_pages(); ++} ++ +diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c +index 3ffae02..bb012ad 100644 +--- a/drivers/gpu/drm/drm_vm.c ++++ b/drivers/gpu/drm/drm_vm.c +@@ -37,9 +37,15 @@ + #if defined(__ia64__) + #include + #endif ++#if defined(CONFIG_X86) ++#include ++#endif + + static void drm_vm_open(struct vm_area_struct *vma); + static void drm_vm_close(struct vm_area_struct *vma); ++static int drm_bo_mmap_locked(struct vm_area_struct *vma, ++ struct file *filp, ++ drm_local_map_t *map); + + static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma) + { +@@ -49,6 +55,14 @@ static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma) + if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) { + pgprot_val(tmp) |= _PAGE_PCD; + pgprot_val(tmp) &= ~_PAGE_PWT; ++#if defined(CONFIG_X86_PAT) ++ /* if PAT is enabled and we are mapping a ++ TTM mapping */ ++ if (map_type == _DRM_TTM && pat_enabled) { ++ pgprot_val(tmp) &= ~_PAGE_PCD; ++ pgprot_val(tmp) |= _PAGE_PWT; ++ } ++#endif + } + #elif defined(__powerpc__) + pgprot_val(tmp) |= _PAGE_NO_CACHE; +@@ -270,6 +284,9 @@ static void drm_vm_shm_close(struct vm_area_struct *vma) + case _DRM_GEM: + DRM_ERROR("tried to rmmap GEM object\n"); + break; ++ case _DRM_TTM: ++ BUG_ON(1); ++ break; + } + drm_free(map, sizeof(*map), DRM_MEM_MAPS); + } +@@ -394,6 +411,8 @@ static struct vm_operations_struct drm_vm_sg_ops = { + .close = drm_vm_close, + }; + ++ ++ + /** + * \c open method for shared virtual memory. + * +@@ -420,6 +439,7 @@ void drm_vm_open_locked(struct vm_area_struct *vma) + } + } + ++ + static void drm_vm_open(struct vm_area_struct *vma) + { + struct drm_file *priv = vma->vm_file->private_data; +@@ -650,6 +670,8 @@ int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) + vma->vm_flags |= VM_RESERVED; + vma->vm_page_prot = drm_dma_prot(map->type, vma); + break; ++ case _DRM_TTM: ++ return drm_bo_mmap_locked(vma, filp, map); + default: + return -EINVAL; /* This should never happen. */ + } +@@ -674,3 +696,169 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma) + return ret; + } + EXPORT_SYMBOL(drm_mmap); ++ ++static int drm_bo_vm_fault(struct vm_area_struct *vma, ++ struct vm_fault *vmf) ++{ ++ struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data; ++ unsigned long page_offset; ++ struct page *page = NULL; ++ struct drm_ttm *ttm; ++ struct drm_device *dev; ++ unsigned long pfn; ++ int err; ++ unsigned long bus_base; ++ unsigned long bus_offset; ++ unsigned long bus_size; ++ unsigned long ret = VM_FAULT_NOPAGE; ++ ++ dev = bo->dev; ++ err = mutex_lock_interruptible(&bo->mutex); ++ if (err) { ++ return VM_FAULT_NOPAGE; ++ } ++ ++ err = drm_bo_wait(bo, 0, 1, 0, 1); ++ if (err) { ++ ret = (err != -EAGAIN) ? VM_FAULT_SIGBUS : VM_FAULT_NOPAGE; ++ bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED; ++ goto out_unlock; ++ } ++ ++ bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED; ++ ++ /* ++ * If buffer happens to be in a non-mappable location, ++ * move it to a mappable. ++ */ ++ ++ if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) { ++ uint32_t new_flags = bo->mem.proposed_flags | ++ DRM_BO_FLAG_MAPPABLE | ++ DRM_BO_FLAG_FORCE_MAPPABLE; ++ err = drm_bo_move_buffer(bo, new_flags, 0, 0); ++ if (err) { ++ ret = (err != -EAGAIN) ? VM_FAULT_SIGBUS : VM_FAULT_NOPAGE; ++ goto out_unlock; ++ } ++ } ++ ++ err = drm_bo_pci_offset(dev, &bo->mem, &bus_base, &bus_offset, ++ &bus_size); ++ ++ if (err) { ++ ret = VM_FAULT_SIGBUS; ++ goto out_unlock; ++ } ++ ++ page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> PAGE_SHIFT; ++ ++ if (bus_size) { ++ struct drm_mem_type_manager *man = &dev->bm.man[bo->mem.mem_type]; ++ ++ pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + page_offset; ++ vma->vm_page_prot = drm_io_prot(man->drm_bus_maptype, vma); ++ } else { ++ ttm = bo->ttm; ++ ++ drm_ttm_fixup_caching(ttm); ++ page = drm_ttm_get_page(ttm, page_offset); ++ if (!page) { ++ ret = VM_FAULT_OOM; ++ goto out_unlock; ++ } ++ pfn = page_to_pfn(page); ++ vma->vm_page_prot = (bo->mem.flags & DRM_BO_FLAG_CACHED) ? ++ vm_get_page_prot(vma->vm_flags) : ++ drm_io_prot(_DRM_TTM, vma); ++ } ++ ++ err = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); ++ if (err) { ++ ret = (err != -EAGAIN) ? VM_FAULT_OOM : VM_FAULT_NOPAGE; ++ goto out_unlock; ++ } ++out_unlock: ++ BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED); ++ mutex_unlock(&bo->mutex); ++ return ret; ++} ++ ++static void drm_bo_vm_open_locked(struct vm_area_struct *vma) ++{ ++ struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data; ++ ++ /* clear the clean flags */ ++ bo->mem.flags &= ~DRM_BO_FLAG_CLEAN; ++ bo->mem.proposed_flags &= ~DRM_BO_FLAG_CLEAN; ++ ++ drm_vm_open_locked(vma); ++ atomic_inc(&bo->usage); ++} ++ ++/** ++ * \c vma open method for buffer objects. ++ * ++ * \param vma virtual memory area. ++ */ ++ ++static void drm_bo_vm_open(struct vm_area_struct *vma) ++{ ++ struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data; ++ struct drm_device *dev = bo->dev; ++ ++ mutex_lock(&dev->struct_mutex); ++ drm_bo_vm_open_locked(vma); ++ mutex_unlock(&dev->struct_mutex); ++} ++ ++/** ++ * \c vma close method for buffer objects. ++ * ++ * \param vma virtual memory area. ++ */ ++ ++static void drm_bo_vm_close(struct vm_area_struct *vma) ++{ ++ struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data; ++ struct drm_device *dev = bo->dev; ++ ++ drm_vm_close(vma); ++ if (bo) { ++ mutex_lock(&dev->struct_mutex); ++ drm_bo_usage_deref_locked((struct drm_buffer_object **) ++ &vma->vm_private_data); ++ mutex_unlock(&dev->struct_mutex); ++ } ++ return; ++} ++ ++ ++static struct vm_operations_struct drm_bo_vm_ops = { ++ .fault = drm_bo_vm_fault, ++ .open = drm_bo_vm_open, ++ .close = drm_bo_vm_close, ++}; ++ ++ ++/** ++ * mmap buffer object memory. ++ * ++ * \param vma virtual memory area. ++ * \param file_priv DRM file private. ++ * \param map The buffer object drm map. ++ * \return zero on success or a negative number on failure. ++ */ ++ ++int drm_bo_mmap_locked(struct vm_area_struct *vma, ++ struct file *filp, ++ drm_local_map_t *map) ++{ ++ vma->vm_ops = &drm_bo_vm_ops; ++ vma->vm_private_data = map->handle; ++ vma->vm_file = filp; ++ vma->vm_flags |= VM_RESERVED | VM_IO; ++ vma->vm_flags |= VM_PFNMAP; ++ drm_bo_vm_open_locked(vma); ++ return 0; ++} +diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c +index 0dee776..b6eef19 100644 +--- a/drivers/gpu/drm/i915/i915_dma.c ++++ b/drivers/gpu/drm/i915/i915_dma.c +@@ -177,6 +177,16 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; + ++ master_priv->sarea = drm_getsarea(dev); ++ if (!master_priv->sarea) { ++ DRM_ERROR("can not find sarea!\n"); ++ i915_dma_cleanup(dev); ++ return -EINVAL; ++ } ++ ++ master_priv->sarea_priv = (drm_i915_sarea_t *) ++ ((u8 *) master_priv->sarea->handle + init->sarea_priv_offset); ++ + if (init->ring_size != 0) { + if (dev_priv->ring.ring_obj != NULL) { + i915_dma_cleanup(dev); +diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c +index 0cadafb..c2049e7 100644 +--- a/drivers/gpu/drm/i915/i915_irq.c ++++ b/drivers/gpu/drm/i915/i915_irq.c +@@ -361,7 +361,8 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr) + return ret; + } + +-/* Needs the lock as it touches the ring. ++/* Needs the lock as it touches the ring, though if user space haven't ++ * set up a lock, we expect it to not touch the ring. + */ + int i915_irq_emit(struct drm_device *dev, void *data, + struct drm_file *file_priv) +diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c +index 5689e44..e5cab0f 100644 +--- a/drivers/gpu/drm/i915/intel_display.c ++++ b/drivers/gpu/drm/i915/intel_display.c +@@ -419,12 +419,12 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y) + + switch (pipe) { + case 0: +- master_priv->sarea_priv->pipeA_x = x; +- master_priv->sarea_priv->pipeA_y = y; ++ master_priv->sarea_priv->planeA_x = x; ++ master_priv->sarea_priv->planeA_y = y; + break; + case 1: +- master_priv->sarea_priv->pipeB_x = x; +- master_priv->sarea_priv->pipeB_y = y; ++ master_priv->sarea_priv->planeB_x = x; ++ master_priv->sarea_priv->planeB_y = y; + break; + default: + DRM_ERROR("Can't update pipe %d in SAREA\n", pipe); +@@ -549,12 +549,12 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode) + + switch (pipe) { + case 0: +- master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0; +- master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0; ++ master_priv->sarea_priv->planeA_w = enabled ? crtc->mode.hdisplay : 0; ++ master_priv->sarea_priv->planeA_h = enabled ? crtc->mode.vdisplay : 0; + break; + case 1: +- master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0; +- master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0; ++ master_priv->sarea_priv->planeB_w = enabled ? crtc->mode.hdisplay : 0; ++ master_priv->sarea_priv->planeB_h = enabled ? crtc->mode.vdisplay : 0; + break; + default: + DRM_ERROR("Can't update pipe %d in SAREA\n", pipe); +diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile +index feb521e..5d6bc6c 100644 +--- a/drivers/gpu/drm/radeon/Makefile ++++ b/drivers/gpu/drm/radeon/Makefile +@@ -3,7 +3,11 @@ + # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. + + ccflags-y := -Iinclude/drm +-radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o r300_cmdbuf.o ++radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o r300_cmdbuf.o \ ++ radeon_gem.o radeon_buffer.o radeon_fence.o radeon_cs.o \ ++ radeon_i2c.o radeon_fb.o radeon_encoders.o radeon_connectors.o radeon_display.o \ ++ atombios_crtc.o atom.o radeon_atombios.o radeon_combios.o radeon_legacy_crtc.o \ ++ radeon_legacy_encoders.o radeon_cursor.o radeon_pm.o radeon_gem_proc.o + + radeon-$(CONFIG_COMPAT) += radeon_ioc32.o + +diff --git a/drivers/gpu/drm/radeon/ObjectID.h b/drivers/gpu/drm/radeon/ObjectID.h +new file mode 100644 +index 0000000..f1f18a4 +--- /dev/null ++++ b/drivers/gpu/drm/radeon/ObjectID.h +@@ -0,0 +1,518 @@ ++/* ++* Copyright 2006-2007 Advanced Micro Devices, Inc. ++* ++* Permission is hereby granted, free of charge, to any person obtaining a ++* copy of this software and associated documentation files (the "Software"), ++* to deal in the Software without restriction, including without limitation ++* the rights to use, copy, modify, merge, publish, distribute, sublicense, ++* and/or sell copies of the Software, and to permit persons to whom the ++* Software is furnished to do so, subject to the following conditions: ++* ++* The above copyright notice and this permission notice shall be included in ++* all copies or substantial portions of the Software. ++* ++* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR ++* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++* OTHER DEALINGS IN THE SOFTWARE. ++*/ ++/* based on stg/asic_reg/drivers/inc/asic_reg/ObjectID.h ver 23 */ ++ ++#ifndef _OBJECTID_H ++#define _OBJECTID_H ++ ++#if defined(_X86_) ++#pragma pack(1) ++#endif ++ ++/****************************************************/ ++/* Graphics Object Type Definition */ ++/****************************************************/ ++#define GRAPH_OBJECT_TYPE_NONE 0x0 ++#define GRAPH_OBJECT_TYPE_GPU 0x1 ++#define GRAPH_OBJECT_TYPE_ENCODER 0x2 ++#define GRAPH_OBJECT_TYPE_CONNECTOR 0x3 ++#define GRAPH_OBJECT_TYPE_ROUTER 0x4 ++/* deleted */ ++ ++/****************************************************/ ++/* Encoder Object ID Definition */ ++/****************************************************/ ++#define ENCODER_OBJECT_ID_NONE 0x00 ++ ++/* Radeon Class Display Hardware */ ++#define ENCODER_OBJECT_ID_INTERNAL_LVDS 0x01 ++#define ENCODER_OBJECT_ID_INTERNAL_TMDS1 0x02 ++#define ENCODER_OBJECT_ID_INTERNAL_TMDS2 0x03 ++#define ENCODER_OBJECT_ID_INTERNAL_DAC1 0x04 ++#define ENCODER_OBJECT_ID_INTERNAL_DAC2 0x05 /* TV/CV DAC */ ++#define ENCODER_OBJECT_ID_INTERNAL_SDVOA 0x06 ++#define ENCODER_OBJECT_ID_INTERNAL_SDVOB 0x07 ++ ++/* External Third Party Encoders */ ++#define ENCODER_OBJECT_ID_SI170B 0x08 ++#define ENCODER_OBJECT_ID_CH7303 0x09 ++#define ENCODER_OBJECT_ID_CH7301 0x0A ++#define ENCODER_OBJECT_ID_INTERNAL_DVO1 0x0B /* This belongs to Radeon Class Display Hardware */ ++#define ENCODER_OBJECT_ID_EXTERNAL_SDVOA 0x0C ++#define ENCODER_OBJECT_ID_EXTERNAL_SDVOB 0x0D ++#define ENCODER_OBJECT_ID_TITFP513 0x0E ++#define ENCODER_OBJECT_ID_INTERNAL_LVTM1 0x0F /* not used for Radeon */ ++#define ENCODER_OBJECT_ID_VT1623 0x10 ++#define ENCODER_OBJECT_ID_HDMI_SI1930 0x11 ++#define ENCODER_OBJECT_ID_HDMI_INTERNAL 0x12 ++/* Kaleidoscope (KLDSCP) Class Display Hardware (internal) */ ++#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 0x13 ++#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1 0x14 ++#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1 0x15 ++#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2 0x16 /* Shared with CV/TV and CRT */ ++#define ENCODER_OBJECT_ID_SI178 0X17 /* External TMDS (dual link, no HDCP.) */ ++#define ENCODER_OBJECT_ID_MVPU_FPGA 0x18 /* MVPU FPGA chip */ ++#define ENCODER_OBJECT_ID_INTERNAL_DDI 0x19 ++#define ENCODER_OBJECT_ID_VT1625 0x1A ++#define ENCODER_OBJECT_ID_HDMI_SI1932 0x1B ++#define ENCODER_OBJECT_ID_DP_AN9801 0x1C ++#define ENCODER_OBJECT_ID_DP_DP501 0x1D ++#define ENCODER_OBJECT_ID_INTERNAL_UNIPHY 0x1E ++#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA 0x1F ++#define ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 0x20 ++#define ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 0x21 ++ ++#define ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO 0xFF ++ ++/****************************************************/ ++/* Connector Object ID Definition */ ++/****************************************************/ ++#define CONNECTOR_OBJECT_ID_NONE 0x00 ++#define CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I 0x01 ++#define CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I 0x02 ++#define CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D 0x03 ++#define CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D 0x04 ++#define CONNECTOR_OBJECT_ID_VGA 0x05 ++#define CONNECTOR_OBJECT_ID_COMPOSITE 0x06 ++#define CONNECTOR_OBJECT_ID_SVIDEO 0x07 ++#define CONNECTOR_OBJECT_ID_YPbPr 0x08 ++#define CONNECTOR_OBJECT_ID_D_CONNECTOR 0x09 ++#define CONNECTOR_OBJECT_ID_9PIN_DIN 0x0A /* Supports both CV & TV */ ++#define CONNECTOR_OBJECT_ID_SCART 0x0B ++#define CONNECTOR_OBJECT_ID_HDMI_TYPE_A 0x0C ++#define CONNECTOR_OBJECT_ID_HDMI_TYPE_B 0x0D ++#define CONNECTOR_OBJECT_ID_LVDS 0x0E ++#define CONNECTOR_OBJECT_ID_7PIN_DIN 0x0F ++#define CONNECTOR_OBJECT_ID_PCIE_CONNECTOR 0x10 ++#define CONNECTOR_OBJECT_ID_CROSSFIRE 0x11 ++#define CONNECTOR_OBJECT_ID_HARDCODE_DVI 0x12 ++#define CONNECTOR_OBJECT_ID_DISPLAYPORT 0x13 ++ ++/* deleted */ ++ ++/****************************************************/ ++/* Router Object ID Definition */ ++/****************************************************/ ++#define ROUTER_OBJECT_ID_NONE 0x00 ++#define ROUTER_OBJECT_ID_I2C_EXTENDER_CNTL 0x01 ++ ++/****************************************************/ ++// Graphics Object ENUM ID Definition */ ++/****************************************************/ ++#define GRAPH_OBJECT_ENUM_ID1 0x01 ++#define GRAPH_OBJECT_ENUM_ID2 0x02 ++#define GRAPH_OBJECT_ENUM_ID3 0x03 ++#define GRAPH_OBJECT_ENUM_ID4 0x04 ++#define GRAPH_OBJECT_ENUM_ID5 0x05 ++#define GRAPH_OBJECT_ENUM_ID6 0x06 ++ ++/****************************************************/ ++/* Graphics Object ID Bit definition */ ++/****************************************************/ ++#define OBJECT_ID_MASK 0x00FF ++#define ENUM_ID_MASK 0x0700 ++#define RESERVED1_ID_MASK 0x0800 ++#define OBJECT_TYPE_MASK 0x7000 ++#define RESERVED2_ID_MASK 0x8000 ++ ++#define OBJECT_ID_SHIFT 0x00 ++#define ENUM_ID_SHIFT 0x08 ++#define OBJECT_TYPE_SHIFT 0x0C ++ ++ ++/****************************************************/ ++/* Graphics Object family definition */ ++/****************************************************/ ++#define CONSTRUCTOBJECTFAMILYID(GRAPHICS_OBJECT_TYPE, GRAPHICS_OBJECT_ID) (GRAPHICS_OBJECT_TYPE << OBJECT_TYPE_SHIFT | \ ++ GRAPHICS_OBJECT_ID << OBJECT_ID_SHIFT) ++/****************************************************/ ++/* GPU Object ID definition - Shared with BIOS */ ++/****************************************************/ ++#define GPU_ENUM_ID1 ( GRAPH_OBJECT_TYPE_GPU << OBJECT_TYPE_SHIFT |\ ++ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT) ++ ++/****************************************************/ ++/* Encoder Object ID definition - Shared with BIOS */ ++/****************************************************/ ++/* ++#define ENCODER_INTERNAL_LVDS_ENUM_ID1 0x2101 ++#define ENCODER_INTERNAL_TMDS1_ENUM_ID1 0x2102 ++#define ENCODER_INTERNAL_TMDS2_ENUM_ID1 0x2103 ++#define ENCODER_INTERNAL_DAC1_ENUM_ID1 0x2104 ++#define ENCODER_INTERNAL_DAC2_ENUM_ID1 0x2105 ++#define ENCODER_INTERNAL_SDVOA_ENUM_ID1 0x2106 ++#define ENCODER_INTERNAL_SDVOB_ENUM_ID1 0x2107 ++#define ENCODER_SIL170B_ENUM_ID1 0x2108 ++#define ENCODER_CH7303_ENUM_ID1 0x2109 ++#define ENCODER_CH7301_ENUM_ID1 0x210A ++#define ENCODER_INTERNAL_DVO1_ENUM_ID1 0x210B ++#define ENCODER_EXTERNAL_SDVOA_ENUM_ID1 0x210C ++#define ENCODER_EXTERNAL_SDVOB_ENUM_ID1 0x210D ++#define ENCODER_TITFP513_ENUM_ID1 0x210E ++#define ENCODER_INTERNAL_LVTM1_ENUM_ID1 0x210F ++#define ENCODER_VT1623_ENUM_ID1 0x2110 ++#define ENCODER_HDMI_SI1930_ENUM_ID1 0x2111 ++#define ENCODER_HDMI_INTERNAL_ENUM_ID1 0x2112 ++#define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID1 0x2113 ++#define ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1 0x2114 ++#define ENCODER_INTERNAL_KLDSCP_DAC1_ENUM_ID1 0x2115 ++#define ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1 0x2116 ++#define ENCODER_SI178_ENUM_ID1 0x2117 ++#define ENCODER_MVPU_FPGA_ENUM_ID1 0x2118 ++#define ENCODER_INTERNAL_DDI_ENUM_ID1 0x2119 ++#define ENCODER_VT1625_ENUM_ID1 0x211A ++#define ENCODER_HDMI_SI1932_ENUM_ID1 0x211B ++#define ENCODER_ENCODER_DP_AN9801_ENUM_ID1 0x211C ++#define ENCODER_DP_DP501_ENUM_ID1 0x211D ++#define ENCODER_INTERNAL_UNIPHY_ENUM_ID1 0x211E ++*/ ++#define ENCODER_INTERNAL_LVDS_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ ++ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ ++ ENCODER_OBJECT_ID_INTERNAL_LVDS << OBJECT_ID_SHIFT) ++ ++#define ENCODER_INTERNAL_TMDS1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ ++ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ ++ ENCODER_OBJECT_ID_INTERNAL_TMDS1 << OBJECT_ID_SHIFT) ++ ++#define ENCODER_INTERNAL_TMDS2_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ ++ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ ++ ENCODER_OBJECT_ID_INTERNAL_TMDS2 << OBJECT_ID_SHIFT) ++ ++#define ENCODER_INTERNAL_DAC1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ ++ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ ++ ENCODER_OBJECT_ID_INTERNAL_DAC1 << OBJECT_ID_SHIFT) ++ ++#define ENCODER_INTERNAL_DAC2_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ ++ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ ++ ENCODER_OBJECT_ID_INTERNAL_DAC2 << OBJECT_ID_SHIFT) ++ ++#define ENCODER_INTERNAL_SDVOA_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ ++ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ ++ ENCODER_OBJECT_ID_INTERNAL_SDVOA << OBJECT_ID_SHIFT) ++ ++#define ENCODER_INTERNAL_SDVOA_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ ++ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ ++ ENCODER_OBJECT_ID_INTERNAL_SDVOA << OBJECT_ID_SHIFT) ++ ++#define ENCODER_INTERNAL_SDVOB_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ ++ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ ++ ENCODER_OBJECT_ID_INTERNAL_SDVOB << OBJECT_ID_SHIFT) ++ ++#define ENCODER_SIL170B_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ ++ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ ++ ENCODER_OBJECT_ID_SI170B << OBJECT_ID_SHIFT) ++ ++#define ENCODER_CH7303_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ ++ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ ++ ENCODER_OBJECT_ID_CH7303 << OBJECT_ID_SHIFT) ++ ++#define ENCODER_CH7301_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ ++ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ ++ ENCODER_OBJECT_ID_CH7301 << OBJECT_ID_SHIFT) ++ ++#define ENCODER_INTERNAL_DVO1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ ++ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ ++ ENCODER_OBJECT_ID_INTERNAL_DVO1 << OBJECT_ID_SHIFT) ++ ++#define ENCODER_EXTERNAL_SDVOA_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ ++ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ ++ ENCODER_OBJECT_ID_EXTERNAL_SDVOA << OBJECT_ID_SHIFT) ++ ++#define ENCODER_EXTERNAL_SDVOA_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ ++ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ ++ ENCODER_OBJECT_ID_EXTERNAL_SDVOA << OBJECT_ID_SHIFT) ++ ++ ++#define ENCODER_EXTERNAL_SDVOB_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ ++ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ ++ ENCODER_OBJECT_ID_EXTERNAL_SDVOB << OBJECT_ID_SHIFT) ++ ++ ++#define ENCODER_TITFP513_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ ++ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ ++ ENCODER_OBJECT_ID_TITFP513 << OBJECT_ID_SHIFT) ++ ++#define ENCODER_INTERNAL_LVTM1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ ++ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ ++ ENCODER_OBJECT_ID_INTERNAL_LVTM1 << OBJECT_ID_SHIFT) ++ ++#define ENCODER_VT1623_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ ++ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ ++ ENCODER_OBJECT_ID_VT1623 << OBJECT_ID_SHIFT) ++ ++#define ENCODER_HDMI_SI1930_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ ++ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ ++ ENCODER_OBJECT_ID_HDMI_SI1930 << OBJECT_ID_SHIFT) ++ ++#define ENCODER_HDMI_INTERNAL_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ ++ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ ++ ENCODER_OBJECT_ID_HDMI_INTERNAL << OBJECT_ID_SHIFT) ++ ++#define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ ++ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ ++ ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 << OBJECT_ID_SHIFT) ++ ++ ++#define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ ++ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ ++ ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 << OBJECT_ID_SHIFT) ++ ++ ++#define ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ ++ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ ++ ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1 << OBJECT_ID_SHIFT) ++ ++#define ENCODER_INTERNAL_KLDSCP_DAC1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ ++ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ ++ ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1 << OBJECT_ID_SHIFT) ++ ++#define ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ ++ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ ++ ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2 << OBJECT_ID_SHIFT) // Shared with CV/TV and CRT ++ ++#define ENCODER_SI178_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ ++ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ ++ ENCODER_OBJECT_ID_SI178 << OBJECT_ID_SHIFT) ++ ++#define ENCODER_MVPU_FPGA_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ ++ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ ++ ENCODER_OBJECT_ID_MVPU_FPGA << OBJECT_ID_SHIFT) ++ ++#define ENCODER_INTERNAL_DDI_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ ++ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ ++ ENCODER_OBJECT_ID_INTERNAL_DDI << OBJECT_ID_SHIFT) ++ ++#define ENCODER_VT1625_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ ++ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ ++ ENCODER_OBJECT_ID_VT1625 << OBJECT_ID_SHIFT) ++ ++#define ENCODER_HDMI_SI1932_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ ++ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ ++ ENCODER_OBJECT_ID_HDMI_SI1932 << OBJECT_ID_SHIFT) ++ ++#define ENCODER_DP_DP501_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ ++ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ ++ ENCODER_OBJECT_ID_DP_DP501 << OBJECT_ID_SHIFT) ++ ++#define ENCODER_DP_AN9801_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ ++ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ ++ ENCODER_OBJECT_ID_DP_AN9801 << OBJECT_ID_SHIFT) ++ ++#define ENCODER_INTERNAL_UNIPHY_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ ++ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ ++ ENCODER_OBJECT_ID_INTERNAL_UNIPHY << OBJECT_ID_SHIFT) ++ ++#define ENCODER_INTERNAL_UNIPHY_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ ++ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ ++ ENCODER_OBJECT_ID_INTERNAL_UNIPHY << OBJECT_ID_SHIFT) ++ ++#define ENCODER_INTERNAL_KLDSCP_LVTMA_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ ++ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ ++ ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA << OBJECT_ID_SHIFT) ++ ++#define ENCODER_INTERNAL_UNIPHY1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ ++ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ ++ ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 << OBJECT_ID_SHIFT) ++ ++#define ENCODER_INTERNAL_UNIPHY1_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ ++ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ ++ ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 << OBJECT_ID_SHIFT) ++ ++#define ENCODER_INTERNAL_UNIPHY2_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ ++ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ ++ ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 << OBJECT_ID_SHIFT) ++ ++#define ENCODER_INTERNAL_UNIPHY2_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ ++ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ ++ ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 << OBJECT_ID_SHIFT) ++ ++#define ENCODER_GENERAL_EXTERNAL_DVO_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ ++ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ ++ ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO << OBJECT_ID_SHIFT) ++ ++/****************************************************/ ++/* Connector Object ID definition - Shared with BIOS */ ++/****************************************************/ ++/* ++#define CONNECTOR_SINGLE_LINK_DVI_I_ENUM_ID1 0x3101 ++#define CONNECTOR_DUAL_LINK_DVI_I_ENUM_ID1 0x3102 ++#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID1 0x3103 ++#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID1 0x3104 ++#define CONNECTOR_VGA_ENUM_ID1 0x3105 ++#define CONNECTOR_COMPOSITE_ENUM_ID1 0x3106 ++#define CONNECTOR_SVIDEO_ENUM_ID1 0x3107 ++#define CONNECTOR_YPbPr_ENUM_ID1 0x3108 ++#define CONNECTOR_D_CONNECTORE_ENUM_ID1 0x3109 ++#define CONNECTOR_9PIN_DIN_ENUM_ID1 0x310A ++#define CONNECTOR_SCART_ENUM_ID1 0x310B ++#define CONNECTOR_HDMI_TYPE_A_ENUM_ID1 0x310C ++#define CONNECTOR_HDMI_TYPE_B_ENUM_ID1 0x310D ++#define CONNECTOR_LVDS_ENUM_ID1 0x310E ++#define CONNECTOR_7PIN_DIN_ENUM_ID1 0x310F ++#define CONNECTOR_PCIE_CONNECTOR_ENUM_ID1 0x3110 ++*/ ++#define CONNECTOR_LVDS_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ ++ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ ++ CONNECTOR_OBJECT_ID_LVDS << OBJECT_ID_SHIFT) ++ ++#define CONNECTOR_SINGLE_LINK_DVI_I_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ ++ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ ++ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I << OBJECT_ID_SHIFT) ++ ++#define CONNECTOR_SINGLE_LINK_DVI_I_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ ++ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ ++ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I << OBJECT_ID_SHIFT) ++ ++#define CONNECTOR_DUAL_LINK_DVI_I_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ ++ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ ++ CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I << OBJECT_ID_SHIFT) ++ ++#define CONNECTOR_DUAL_LINK_DVI_I_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ ++ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ ++ CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I << OBJECT_ID_SHIFT) ++ ++#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ ++ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ ++ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT) ++ ++#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ ++ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ ++ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT) ++ ++#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ ++ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ ++ CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT) ++ ++#define CONNECTOR_VGA_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ ++ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ ++ CONNECTOR_OBJECT_ID_VGA << OBJECT_ID_SHIFT) ++ ++#define CONNECTOR_VGA_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ ++ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ ++ CONNECTOR_OBJECT_ID_VGA << OBJECT_ID_SHIFT) ++ ++#define CONNECTOR_COMPOSITE_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ ++ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ ++ CONNECTOR_OBJECT_ID_COMPOSITE << OBJECT_ID_SHIFT) ++ ++#define CONNECTOR_SVIDEO_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ ++ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ ++ CONNECTOR_OBJECT_ID_SVIDEO << OBJECT_ID_SHIFT) ++ ++#define CONNECTOR_YPbPr_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ ++ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ ++ CONNECTOR_OBJECT_ID_YPbPr << OBJECT_ID_SHIFT) ++ ++#define CONNECTOR_D_CONNECTOR_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ ++ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ ++ CONNECTOR_OBJECT_ID_D_CONNECTOR << OBJECT_ID_SHIFT) ++ ++#define CONNECTOR_9PIN_DIN_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ ++ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ ++ CONNECTOR_OBJECT_ID_9PIN_DIN << OBJECT_ID_SHIFT) ++ ++#define CONNECTOR_SCART_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ ++ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ ++ CONNECTOR_OBJECT_ID_SCART << OBJECT_ID_SHIFT) ++ ++#define CONNECTOR_HDMI_TYPE_A_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ ++ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ ++ CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT) ++ ++#define CONNECTOR_HDMI_TYPE_B_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ ++ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ ++ CONNECTOR_OBJECT_ID_HDMI_TYPE_B << OBJECT_ID_SHIFT) ++ ++#define CONNECTOR_7PIN_DIN_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ ++ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ ++ CONNECTOR_OBJECT_ID_7PIN_DIN << OBJECT_ID_SHIFT) ++ ++#define CONNECTOR_PCIE_CONNECTOR_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ ++ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ ++ CONNECTOR_OBJECT_ID_PCIE_CONNECTOR << OBJECT_ID_SHIFT) ++ ++#define CONNECTOR_PCIE_CONNECTOR_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ ++ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ ++ CONNECTOR_OBJECT_ID_PCIE_CONNECTOR << OBJECT_ID_SHIFT) ++ ++#define CONNECTOR_CROSSFIRE_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ ++ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ ++ CONNECTOR_OBJECT_ID_CROSSFIRE << OBJECT_ID_SHIFT) ++ ++#define CONNECTOR_CROSSFIRE_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ ++ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ ++ CONNECTOR_OBJECT_ID_CROSSFIRE << OBJECT_ID_SHIFT) ++ ++ ++#define CONNECTOR_HARDCODE_DVI_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ ++ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ ++ CONNECTOR_OBJECT_ID_HARDCODE_DVI << OBJECT_ID_SHIFT) ++ ++#define CONNECTOR_HARDCODE_DVI_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ ++ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ ++ CONNECTOR_OBJECT_ID_HARDCODE_DVI << OBJECT_ID_SHIFT) ++ ++#define CONNECTOR_DISPLAYPORT_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ ++ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ ++ CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT) ++ ++#define CONNECTOR_DISPLAYPORT_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ ++ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ ++ CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT) ++ ++#define CONNECTOR_DISPLAYPORT_ENUM_ID3 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ ++ GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\ ++ CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT) ++ ++#define CONNECTOR_DISPLAYPORT_ENUM_ID4 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ ++ GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\ ++ CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT) ++ ++/****************************************************/ ++/* Router Object ID definition - Shared with BIOS */ ++/****************************************************/ ++#define ROUTER_I2C_EXTENDER_CNTL_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ROUTER << OBJECT_TYPE_SHIFT |\ ++ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ ++ ROUTER_OBJECT_ID_I2C_EXTENDER_CNTL << OBJECT_ID_SHIFT) ++ ++/* deleted */ ++ ++/****************************************************/ ++/* Object Cap definition - Shared with BIOS */ ++/****************************************************/ ++#define GRAPHICS_OBJECT_CAP_I2C 0x00000001L ++#define GRAPHICS_OBJECT_CAP_TABLE_ID 0x00000002L ++ ++ ++#define GRAPHICS_OBJECT_I2CCOMMAND_TABLE_ID 0x01 ++#define GRAPHICS_OBJECT_HOTPLUGDETECTIONINTERUPT_TABLE_ID 0x02 ++#define GRAPHICS_OBJECT_ENCODER_OUTPUT_PROTECTION_TABLE_ID 0x03 ++ ++#if defined(_X86_) ++#pragma pack() ++#endif ++ ++#endif /*GRAPHICTYPE */ ++ ++ ++ ++ +diff --git a/drivers/gpu/drm/radeon/atom-bits.h b/drivers/gpu/drm/radeon/atom-bits.h +new file mode 100644 +index 0000000..f94d2e2 +--- /dev/null ++++ b/drivers/gpu/drm/radeon/atom-bits.h +@@ -0,0 +1,48 @@ ++/* ++ * Copyright 2008 Advanced Micro Devices, Inc. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ * ++ * Author: Stanislaw Skowronek ++ */ ++ ++#ifndef ATOM_BITS_H ++#define ATOM_BITS_H ++ ++static inline uint8_t get_u8(void *bios, int ptr) ++{ ++ return ((unsigned char *)bios)[ptr]; ++} ++#define U8(ptr) get_u8(ctx->ctx->bios,(ptr)) ++#define CU8(ptr) get_u8(ctx->bios,(ptr)) ++static inline uint16_t get_u16(void *bios, int ptr) ++{ ++ return get_u8(bios,ptr)|(((uint16_t)get_u8(bios,ptr+1))<<8); ++} ++#define U16(ptr) get_u16(ctx->ctx->bios,(ptr)) ++#define CU16(ptr) get_u16(ctx->bios,(ptr)) ++static inline uint32_t get_u32(void *bios, int ptr) ++{ ++ return get_u16(bios,ptr)|(((uint32_t)get_u16(bios,ptr+2))<<16); ++} ++#define U32(ptr) get_u32(ctx->ctx->bios,(ptr)) ++#define CU32(ptr) get_u32(ctx->bios,(ptr)) ++#define CSTR(ptr) (((char *)(ctx->bios))+(ptr)) ++ ++#endif +diff --git a/drivers/gpu/drm/radeon/atom-names.h b/drivers/gpu/drm/radeon/atom-names.h +new file mode 100644 +index 0000000..2cdc170 +--- /dev/null ++++ b/drivers/gpu/drm/radeon/atom-names.h +@@ -0,0 +1,100 @@ ++/* ++ * Copyright 2008 Advanced Micro Devices, Inc. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ * ++ * Author: Stanislaw Skowronek ++ */ ++ ++#ifndef ATOM_NAMES_H ++#define ATOM_NAMES_H ++ ++#include "atom.h" ++ ++#ifdef ATOM_DEBUG ++ ++#define ATOM_OP_NAMES_CNT 123 ++static char *atom_op_names[ATOM_OP_NAMES_CNT]={ ++"RESERVED", "MOVE_REG", "MOVE_PS", "MOVE_WS", "MOVE_FB", "MOVE_PLL", ++"MOVE_MC", "AND_REG", "AND_PS", "AND_WS", "AND_FB", "AND_PLL", "AND_MC", ++"OR_REG", "OR_PS", "OR_WS", "OR_FB", "OR_PLL", "OR_MC", "SHIFT_LEFT_REG", ++"SHIFT_LEFT_PS", "SHIFT_LEFT_WS", "SHIFT_LEFT_FB", "SHIFT_LEFT_PLL", ++"SHIFT_LEFT_MC", "SHIFT_RIGHT_REG", "SHIFT_RIGHT_PS", "SHIFT_RIGHT_WS", ++"SHIFT_RIGHT_FB", "SHIFT_RIGHT_PLL", "SHIFT_RIGHT_MC", "MUL_REG", ++"MUL_PS", "MUL_WS", "MUL_FB", "MUL_PLL", "MUL_MC", "DIV_REG", "DIV_PS", ++"DIV_WS", "DIV_FB", "DIV_PLL", "DIV_MC", "ADD_REG", "ADD_PS", "ADD_WS", ++"ADD_FB", "ADD_PLL", "ADD_MC", "SUB_REG", "SUB_PS", "SUB_WS", "SUB_FB", ++"SUB_PLL", "SUB_MC", "SET_ATI_PORT", "SET_PCI_PORT", "SET_SYS_IO_PORT", ++"SET_REG_BLOCK", "SET_FB_BASE", "COMPARE_REG", "COMPARE_PS", ++"COMPARE_WS", "COMPARE_FB", "COMPARE_PLL", "COMPARE_MC", "SWITCH", ++"JUMP", "JUMP_EQUAL", "JUMP_BELOW", "JUMP_ABOVE", "JUMP_BELOW_OR_EQUAL", ++"JUMP_ABOVE_OR_EQUAL", "JUMP_NOT_EQUAL", "TEST_REG", "TEST_PS", "TEST_WS", ++"TEST_FB", "TEST_PLL", "TEST_MC", "DELAY_MILLISEC", "DELAY_MICROSEC", ++"CALL_TABLE", "REPEAT", "CLEAR_REG", "CLEAR_PS", "CLEAR_WS", "CLEAR_FB", ++"CLEAR_PLL", "CLEAR_MC", "NOP", "EOT", "MASK_REG", "MASK_PS", "MASK_WS", ++"MASK_FB", "MASK_PLL", "MASK_MC", "POST_CARD", "BEEP", "SAVE_REG", ++"RESTORE_REG", "SET_DATA_BLOCK", "XOR_REG", "XOR_PS", "XOR_WS", "XOR_FB", ++"XOR_PLL", "XOR_MC", "SHL_REG", "SHL_PS", "SHL_WS", "SHL_FB", "SHL_PLL", ++"SHL_MC", "SHR_REG", "SHR_PS", "SHR_WS", "SHR_FB", "SHR_PLL", "SHR_MC", ++"DEBUG", "CTB_DS", ++}; ++ ++#define ATOM_TABLE_NAMES_CNT 74 ++static char *atom_table_names[ATOM_TABLE_NAMES_CNT]={ ++"ASIC_Init", "GetDisplaySurfaceSize", "ASIC_RegistersInit", ++"VRAM_BlockVenderDetection", "SetClocksRatio", "MemoryControllerInit", ++"GPIO_PinInit", "MemoryParamAdjust", "DVOEncoderControl", ++"GPIOPinControl", "SetEngineClock", "SetMemoryClock", "SetPixelClock", ++"DynamicClockGating", "ResetMemoryDLL", "ResetMemoryDevice", ++"MemoryPLLInit", "EnableMemorySelfRefresh", "AdjustMemoryController", ++"EnableASIC_StaticPwrMgt", "ASIC_StaticPwrMgtStatusChange", ++"DAC_LoadDetection", "TMDS2EncoderControl", "LCD1OutputControl", ++"DAC1EncoderControl", "DAC2EncoderControl", "DVOOutputControl", ++"CV1OutputControl", "SetCRTC_DPM_State", "TVEncoderControl", ++"TMDS1EncoderControl", "LVDSEncoderControl", "TV1OutputControl", ++"EnableScaler", "BlankCRTC", "EnableCRTC", "GetPixelClock", ++"EnableVGA_Render", "EnableVGA_Access", "SetCRTC_Timing", ++"SetCRTC_OverScan", "SetCRTC_Replication", "SelectCRTC_Source", ++"EnableGraphSurfaces", "UpdateCRTC_DoubleBufferRegisters", ++"LUT_AutoFill", "EnableHW_IconCursor", "GetMemoryClock", ++"GetEngineClock", "SetCRTC_UsingDTDTiming", "TVBootUpStdPinDetection", ++"DFP2OutputControl", "VRAM_BlockDetectionByStrap", "MemoryCleanUp", ++"ReadEDIDFromHWAssistedI2C", "WriteOneByteToHWAssistedI2C", ++"ReadHWAssistedI2CStatus", "SpeedFanControl", "PowerConnectorDetection", ++"MC_Synchronization", "ComputeMemoryEnginePLL", "MemoryRefreshConversion", ++"VRAM_GetCurrentInfoBlock", "DynamicMemorySettings", "MemoryTraining", ++"EnableLVDS_SS", "DFP1OutputControl", "SetVoltage", "CRT1OutputControl", ++"CRT2OutputControl", "SetupHWAssistedI2CStatus", "ClockSource", ++"MemoryDeviceInit", "EnableYUV", ++}; ++ ++#define ATOM_IO_NAMES_CNT 5 ++static char *atom_io_names[ATOM_IO_NAMES_CNT]={ ++"MM", "PLL", "MC", "PCIE", "PCIE PORT", ++}; ++ ++#else ++ ++#define ATOM_OP_NAMES_CNT 0 ++#define ATOM_TABLE_NAMES_CNT 0 ++#define ATOM_IO_NAMES_CNT 0 ++ ++#endif ++ ++#endif +diff --git a/drivers/gpu/drm/radeon/atom-types.h b/drivers/gpu/drm/radeon/atom-types.h +new file mode 100644 +index 0000000..1125b86 +--- /dev/null ++++ b/drivers/gpu/drm/radeon/atom-types.h +@@ -0,0 +1,42 @@ ++/* ++ * Copyright 2008 Red Hat Inc. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ * ++ * Author: Dave Airlie ++ */ ++ ++#ifndef ATOM_TYPES_H ++#define ATOM_TYPES_H ++ ++/* sync atom types to kernel types */ ++ ++typedef uint16_t USHORT; ++typedef uint32_t ULONG; ++typedef uint8_t UCHAR; ++ ++ ++#ifndef ATOM_BIG_ENDIAN ++#if defined(__BIG_ENDIAN) ++#define ATOM_BIG_ENDIAN 1 ++#else ++#define ATOM_BIG_ENDIAN 0 ++#endif ++#endif ++#endif +diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c +new file mode 100644 +index 0000000..1154791 +--- /dev/null ++++ b/drivers/gpu/drm/radeon/atom.c +@@ -0,0 +1,1141 @@ ++/* ++ * Copyright 2008 Advanced Micro Devices, Inc. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ * ++ * Author: Stanislaw Skowronek ++ */ ++ ++#include ++#include ++ ++#define ATOM_DEBUG ++ ++#include "atom.h" ++#include "atom-names.h" ++#include "atom-bits.h" ++ ++#define ATOM_COND_ABOVE 0 ++#define ATOM_COND_ABOVEOREQUAL 1 ++#define ATOM_COND_ALWAYS 2 ++#define ATOM_COND_BELOW 3 ++#define ATOM_COND_BELOWOREQUAL 4 ++#define ATOM_COND_EQUAL 5 ++#define ATOM_COND_NOTEQUAL 6 ++ ++#define ATOM_PORT_ATI 0 ++#define ATOM_PORT_PCI 1 ++#define ATOM_PORT_SYSIO 2 ++ ++#define ATOM_UNIT_MICROSEC 0 ++#define ATOM_UNIT_MILLISEC 1 ++ ++#define PLL_INDEX 2 ++#define PLL_DATA 3 ++ ++typedef struct { ++ struct atom_context *ctx; ++ ++ uint32_t *ps, *ws; ++ int ps_shift; ++ uint16_t start; ++} atom_exec_context; ++ ++int atom_debug = 0; ++void atom_execute_table(struct atom_context *ctx, int index, uint32_t *params); ++ ++static uint32_t atom_arg_mask[8] = {0xFFFFFFFF, 0xFFFF, 0xFFFF00, 0xFFFF0000, 0xFF, 0xFF00, 0xFF0000, 0xFF000000}; ++static int atom_arg_shift[8] = {0, 0, 8, 16, 0, 8, 16, 24}; ++static int atom_dst_to_src[8][4] = { // translate destination alignment field to the source alignment encoding ++ { 0, 0, 0, 0 }, ++ { 1, 2, 3, 0 }, ++ { 1, 2, 3, 0 }, ++ { 1, 2, 3, 0 }, ++ { 4, 5, 6, 7 }, ++ { 4, 5, 6, 7 }, ++ { 4, 5, 6, 7 }, ++ { 4, 5, 6, 7 }, ++}; ++static int atom_def_dst[8] = { 0, 0, 1, 2, 0, 1, 2, 3 }; ++ ++static int debug_depth = 0; ++#ifdef ATOM_DEBUG ++static void debug_print_spaces(int n) ++{ ++ while(n--) ++ printk(" "); ++} ++#define DEBUG(...) do if(atom_debug) { printk(KERN_DEBUG __VA_ARGS__); } while(0) ++#define SDEBUG(...) do if(atom_debug) { printk(KERN_DEBUG); debug_print_spaces(debug_depth); printk(__VA_ARGS__); } while(0) ++#else ++#define DEBUG(...) do { } while(0) ++#define SDEBUG(...) do { } while(0) ++#endif ++ ++static uint32_t atom_iio_execute(struct atom_context *ctx, int base, uint32_t index, uint32_t data) ++{ ++ uint32_t temp = 0xCDCDCDCD; ++ while(1) ++ switch(CU8(base)) { ++ case ATOM_IIO_NOP: ++ base++; ++ break; ++ case ATOM_IIO_READ: ++ temp = ctx->card->reg_read(ctx->card, CU16(base+1)); ++ base+=3; ++ break; ++ case ATOM_IIO_WRITE: ++ ctx->card->reg_write(ctx->card, CU16(base+1), temp); ++ base+=3; ++ break; ++ case ATOM_IIO_CLEAR: ++ temp &= ~((0xFFFFFFFF >> (32-CU8(base+1))) << CU8(base+2)); ++ base+=3; ++ break; ++ case ATOM_IIO_SET: ++ temp |= (0xFFFFFFFF >> (32-CU8(base+1))) << CU8(base+2); ++ base+=3; ++ break; ++ case ATOM_IIO_MOVE_INDEX: ++ temp &= ~((0xFFFFFFFF >> (32-CU8(base+1))) << CU8(base+2)); ++ temp |= ((index >> CU8(base+2)) & (0xFFFFFFFF >> (32-CU8(base+1)))) << CU8(base+3); ++ base+=4; ++ break; ++ case ATOM_IIO_MOVE_DATA: ++ temp &= ~((0xFFFFFFFF >> (32-CU8(base+1))) << CU8(base+2)); ++ temp |= ((data >> CU8(base+2)) & (0xFFFFFFFF >> (32-CU8(base+1)))) << CU8(base+3); ++ base+=4; ++ break; ++ case ATOM_IIO_MOVE_ATTR: ++ temp &= ~((0xFFFFFFFF >> (32-CU8(base+1))) << CU8(base+2)); ++ temp |= ((ctx->io_attr >> CU8(base+2)) & (0xFFFFFFFF >> (32-CU8(base+1)))) << CU8(base+3); ++ base+=4; ++ break; ++ case ATOM_IIO_END: ++ return temp; ++ default: ++ printk(KERN_INFO "Unknown IIO opcode.\n"); ++ return 0; ++ } ++} ++ ++static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr, int *ptr, uint32_t *saved, int print) ++{ ++ uint32_t idx, val = 0xCDCDCDCD, align, arg; ++ struct atom_context *gctx = ctx->ctx; ++ arg = attr & 7; ++ align = (attr >> 3) & 7; ++ switch(arg) { ++ case ATOM_ARG_REG: ++ idx = U16(*ptr); ++ (*ptr)+=2; ++ if(print) ++ DEBUG("REG[0x%04X]", idx); ++ idx += gctx->reg_block; ++ switch(gctx->io_mode) { ++ case ATOM_IO_MM: ++ val = gctx->card->reg_read(gctx->card, idx); ++ break; ++ case ATOM_IO_PCI: ++ printk(KERN_INFO "PCI registers are not implemented.\n"); ++ return 0; ++ case ATOM_IO_SYSIO: ++ printk(KERN_INFO "SYSIO registers are not implemented.\n"); ++ return 0; ++ default: ++ if(!(gctx->io_mode&0x80)) { ++ printk(KERN_INFO "Bad IO mode.\n"); ++ return 0; ++ } ++ if(!gctx->iio[gctx->io_mode&0x7F]) { ++ printk(KERN_INFO "Undefined indirect IO read method %d.\n", gctx->io_mode&0x7F); ++ return 0; ++ } ++ val = atom_iio_execute(gctx, gctx->iio[gctx->io_mode&0x7F], idx, 0); ++ } ++ break; ++ case ATOM_ARG_PS: ++ idx = U8(*ptr); ++ (*ptr)++; ++ val = le32_to_cpu(ctx->ps[idx]); ++ if(print) ++ DEBUG("PS[0x%02X,0x%04X]", idx, val); ++ break; ++ case ATOM_ARG_WS: ++ idx = U8(*ptr); ++ (*ptr)++; ++ if(print) ++ DEBUG("WS[0x%02X]", idx); ++ switch(idx) { ++ case ATOM_WS_QUOTIENT: ++ val = gctx->divmul[0]; ++ break; ++ case ATOM_WS_REMAINDER: ++ val = gctx->divmul[1]; ++ break; ++ case ATOM_WS_DATAPTR: ++ val = gctx->data_block; ++ break; ++ case ATOM_WS_SHIFT: ++ val = gctx->shift; ++ break; ++ case ATOM_WS_OR_MASK: ++ val = 1<shift; ++ break; ++ case ATOM_WS_AND_MASK: ++ val = ~(1<shift); ++ break; ++ case ATOM_WS_FB_WINDOW: ++ val = gctx->fb_base; ++ break; ++ case ATOM_WS_ATTRIBUTES: ++ val = gctx->io_attr; ++ break; ++ default: ++ val = ctx->ws[idx]; ++ } ++ break; ++ case ATOM_ARG_ID: ++ idx = U16(*ptr); ++ (*ptr)+=2; ++ if(print) { ++ if(gctx->data_block) ++ DEBUG("ID[0x%04X+%04X]", idx, gctx->data_block); ++ else ++ DEBUG("ID[0x%04X]", idx); ++ } ++ val = U32(idx + gctx->data_block); ++ break; ++ case ATOM_ARG_FB: ++ idx = U8(*ptr); ++ (*ptr)++; ++ if(print) ++ DEBUG("FB[0x%02X]", idx); ++ printk(KERN_INFO "FB access is not implemented.\n"); ++ return 0; ++ case ATOM_ARG_IMM: ++ switch(align) { ++ case ATOM_SRC_DWORD: ++ val = U32(*ptr); ++ (*ptr)+=4; ++ if(print) ++ DEBUG("IMM 0x%08X\n", val); ++ return val; ++ case ATOM_SRC_WORD0: ++ case ATOM_SRC_WORD8: ++ case ATOM_SRC_WORD16: ++ val = U16(*ptr); ++ (*ptr)+=2; ++ if(print) ++ DEBUG("IMM 0x%04X\n", val); ++ return val; ++ case ATOM_SRC_BYTE0: ++ case ATOM_SRC_BYTE8: ++ case ATOM_SRC_BYTE16: ++ case ATOM_SRC_BYTE24: ++ val = U8(*ptr); ++ (*ptr)++; ++ if(print) ++ DEBUG("IMM 0x%02X\n", val); ++ return val; ++ } ++ return 0; ++ case ATOM_ARG_PLL: ++ idx = U8(*ptr); ++ (*ptr)++; ++ if(print) ++ DEBUG("PLL[0x%02X]", idx); ++ val = gctx->card->pll_read(gctx->card, idx); ++ break; ++ case ATOM_ARG_MC: ++ idx = U8(*ptr); ++ (*ptr)++; ++ if(print) ++ DEBUG("MC[0x%02X]", idx); ++ val = gctx->card->mc_read(gctx->card, idx); ++ break; ++ } ++ if(saved) ++ *saved = val; ++ val &= atom_arg_mask[align]; ++ val >>= atom_arg_shift[align]; ++ if(print) ++ switch(align) { ++ case ATOM_SRC_DWORD: ++ DEBUG(".[31:0] -> 0x%08X\n", val); ++ break; ++ case ATOM_SRC_WORD0: ++ DEBUG(".[15:0] -> 0x%04X\n", val); ++ break; ++ case ATOM_SRC_WORD8: ++ DEBUG(".[23:8] -> 0x%04X\n", val); ++ break; ++ case ATOM_SRC_WORD16: ++ DEBUG(".[31:16] -> 0x%04X\n", val); ++ break; ++ case ATOM_SRC_BYTE0: ++ DEBUG(".[7:0] -> 0x%02X\n", val); ++ break; ++ case ATOM_SRC_BYTE8: ++ DEBUG(".[15:8] -> 0x%02X\n", val); ++ break; ++ case ATOM_SRC_BYTE16: ++ DEBUG(".[23:16] -> 0x%02X\n", val); ++ break; ++ case ATOM_SRC_BYTE24: ++ DEBUG(".[31:24] -> 0x%02X\n", val); ++ break; ++ } ++ return val; ++} ++ ++static void atom_skip_src_int(atom_exec_context *ctx, uint8_t attr, int *ptr) ++{ ++ uint32_t align = (attr >> 3) & 7, arg = attr & 7; ++ switch(arg) { ++ case ATOM_ARG_REG: ++ case ATOM_ARG_ID: ++ (*ptr)+=2; ++ break; ++ case ATOM_ARG_PLL: ++ case ATOM_ARG_MC: ++ case ATOM_ARG_PS: ++ case ATOM_ARG_WS: ++ case ATOM_ARG_FB: ++ (*ptr)++; ++ break; ++ case ATOM_ARG_IMM: ++ switch(align) { ++ case ATOM_SRC_DWORD: ++ (*ptr)+=4; ++ return; ++ case ATOM_SRC_WORD0: ++ case ATOM_SRC_WORD8: ++ case ATOM_SRC_WORD16: ++ (*ptr)+=2; ++ return; ++ case ATOM_SRC_BYTE0: ++ case ATOM_SRC_BYTE8: ++ case ATOM_SRC_BYTE16: ++ case ATOM_SRC_BYTE24: ++ (*ptr)++; ++ return; ++ } ++ return; ++ } ++} ++ ++static uint32_t atom_get_src(atom_exec_context *ctx, uint8_t attr, int *ptr) ++{ ++ return atom_get_src_int(ctx, attr, ptr, NULL, 1); ++} ++ ++static uint32_t atom_get_dst(atom_exec_context *ctx, int arg, uint8_t attr, int *ptr, uint32_t *saved, int print) ++{ ++ return atom_get_src_int(ctx, arg|atom_dst_to_src[(attr>>3)&7][(attr>>6)&3]<<3, ptr, saved, print); ++} ++ ++static void atom_skip_dst(atom_exec_context *ctx, int arg, uint8_t attr, int *ptr) ++{ ++ atom_skip_src_int(ctx, arg|atom_dst_to_src[(attr>>3)&7][(attr>>6)&3]<<3, ptr); ++} ++ ++static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr, int *ptr, uint32_t val, uint32_t saved) ++{ ++ uint32_t align = atom_dst_to_src[(attr>>3)&7][(attr>>6)&3], old_val = val, idx; ++ struct atom_context *gctx = ctx->ctx; ++ old_val &= atom_arg_mask[align] >> atom_arg_shift[align]; ++ val <<= atom_arg_shift[align]; ++ val &= atom_arg_mask[align]; ++ saved &= ~atom_arg_mask[align]; ++ val |= saved; ++ switch(arg) { ++ case ATOM_ARG_REG: ++ idx = U16(*ptr); ++ (*ptr)+=2; ++ DEBUG("REG[0x%04X]", idx); ++ idx += gctx->reg_block; ++ switch(gctx->io_mode) { ++ case ATOM_IO_MM: ++ if(idx == 0) ++ gctx->card->reg_write(gctx->card, idx, val<<2); ++ else ++ gctx->card->reg_write(gctx->card, idx, val); ++ break; ++ case ATOM_IO_PCI: ++ printk(KERN_INFO "PCI registers are not implemented.\n"); ++ return; ++ case ATOM_IO_SYSIO: ++ printk(KERN_INFO "SYSIO registers are not implemented.\n"); ++ return; ++ default: ++ if(!(gctx->io_mode&0x80)) { ++ printk(KERN_INFO "Bad IO mode.\n"); ++ return; ++ } ++ if(!gctx->iio[gctx->io_mode&0xFF]) { ++ printk(KERN_INFO "Undefined indirect IO write method %d.\n", gctx->io_mode&0x7F); ++ return; ++ } ++ atom_iio_execute(gctx, gctx->iio[gctx->io_mode&0xFF], idx, val); ++ } ++ break; ++ case ATOM_ARG_PS: ++ idx = U8(*ptr); ++ (*ptr)++; ++ DEBUG("PS[0x%02X]", idx); ++ ctx->ps[idx] = cpu_to_le32(val); ++ break; ++ case ATOM_ARG_WS: ++ idx = U8(*ptr); ++ (*ptr)++; ++ DEBUG("WS[0x%02X]", idx); ++ switch(idx) { ++ case ATOM_WS_QUOTIENT: ++ gctx->divmul[0] = val; ++ break; ++ case ATOM_WS_REMAINDER: ++ gctx->divmul[1] = val; ++ break; ++ case ATOM_WS_DATAPTR: ++ gctx->data_block = val; ++ break; ++ case ATOM_WS_SHIFT: ++ gctx->shift = val; ++ break; ++ case ATOM_WS_OR_MASK: ++ case ATOM_WS_AND_MASK: ++ break; ++ case ATOM_WS_FB_WINDOW: ++ gctx->fb_base = val; ++ break; ++ case ATOM_WS_ATTRIBUTES: ++ gctx->io_attr = val; ++ break; ++ default: ++ ctx->ws[idx] = val; ++ } ++ break; ++ case ATOM_ARG_FB: ++ idx = U8(*ptr); ++ (*ptr)++; ++ DEBUG("FB[0x%02X]", idx); ++ printk(KERN_INFO "FB access is not implemented.\n"); ++ return; ++ case ATOM_ARG_PLL: ++ idx = U8(*ptr); ++ (*ptr)++; ++ DEBUG("PLL[0x%02X]", idx); ++ gctx->card->pll_write(gctx->card, idx, val); ++ break; ++ case ATOM_ARG_MC: ++ idx = U8(*ptr); ++ (*ptr)++; ++ DEBUG("MC[0x%02X]", idx); ++ gctx->card->mc_write(gctx->card, idx, val); ++ return; ++ } ++ switch(align) { ++ case ATOM_SRC_DWORD: ++ DEBUG(".[31:0] <- 0x%08X\n", old_val); ++ break; ++ case ATOM_SRC_WORD0: ++ DEBUG(".[15:0] <- 0x%04X\n", old_val); ++ break; ++ case ATOM_SRC_WORD8: ++ DEBUG(".[23:8] <- 0x%04X\n", old_val); ++ break; ++ case ATOM_SRC_WORD16: ++ DEBUG(".[31:16] <- 0x%04X\n", old_val); ++ break; ++ case ATOM_SRC_BYTE0: ++ DEBUG(".[7:0] <- 0x%02X\n", old_val); ++ break; ++ case ATOM_SRC_BYTE8: ++ DEBUG(".[15:8] <- 0x%02X\n", old_val); ++ break; ++ case ATOM_SRC_BYTE16: ++ DEBUG(".[23:16] <- 0x%02X\n", old_val); ++ break; ++ case ATOM_SRC_BYTE24: ++ DEBUG(".[31:24] <- 0x%02X\n", old_val); ++ break; ++ } ++} ++ ++static void atom_op_add(atom_exec_context *ctx, int *ptr, int arg) ++{ ++ uint8_t attr = U8((*ptr)++); ++ uint32_t dst, src, saved; ++ int dptr = *ptr; ++ SDEBUG(" dst: "); ++ dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); ++ SDEBUG(" src: "); ++ src = atom_get_src(ctx, attr, ptr); ++ dst += src; ++ SDEBUG(" dst: "); ++ atom_put_dst(ctx, arg, attr, &dptr, dst, saved); ++} ++ ++static void atom_op_and(atom_exec_context *ctx, int *ptr, int arg) ++{ ++ uint8_t attr = U8((*ptr)++); ++ uint32_t dst, src, saved; ++ int dptr = *ptr; ++ SDEBUG(" dst: "); ++ dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); ++ SDEBUG(" src: "); ++ src = atom_get_src(ctx, attr, ptr); ++ dst &= src; ++ SDEBUG(" dst: "); ++ atom_put_dst(ctx, arg, attr, &dptr, dst, saved); ++} ++ ++static void atom_op_beep(atom_exec_context *ctx, int *ptr, int arg) ++{ ++ printk("ATOM BIOS beeped!\n"); ++} ++ ++static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg) ++{ ++ int idx = U8((*ptr)++); ++ if(idx < ATOM_TABLE_NAMES_CNT) ++ SDEBUG(" table: %d (%s)\n", idx, atom_table_names[idx]); ++ else ++ SDEBUG(" table: %d\n", idx); ++ if(U16(ctx->ctx->cmd_table + 4 + 2*idx)) ++ atom_execute_table(ctx->ctx, idx, ctx->ps+ctx->ps_shift); ++} ++ ++static void atom_op_clear(atom_exec_context *ctx, int *ptr, int arg) ++{ ++ uint8_t attr = U8((*ptr)++); ++ uint32_t saved; ++ int dptr = *ptr; ++ attr &= 0x38; ++ attr |= atom_def_dst[attr>>3]<<6; ++ atom_get_dst(ctx, arg, attr, ptr, &saved, 0); ++ SDEBUG(" dst: "); ++ atom_put_dst(ctx, arg, attr, &dptr, 0, saved); ++} ++ ++static void atom_op_compare(atom_exec_context *ctx, int *ptr, int arg) ++{ ++ uint8_t attr = U8((*ptr)++); ++ uint32_t dst, src; ++ SDEBUG(" src1: "); ++ dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1); ++ SDEBUG(" src2: "); ++ src = atom_get_src(ctx, attr, ptr); ++ ctx->ctx->cs_equal = (dst == src); ++ ctx->ctx->cs_above = (dst > src); ++ SDEBUG(" result: %s %s\n", ctx->ctx->cs_equal?"EQ":"NE", ctx->ctx->cs_above?"GT":"LE"); ++} ++ ++static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg) ++{ ++ uint8_t count = U8((*ptr)++); ++ SDEBUG(" count: %d\n", count); ++ if(arg == ATOM_UNIT_MICROSEC) ++ schedule_timeout_uninterruptible(usecs_to_jiffies(count)); ++ else ++ schedule_timeout_uninterruptible(msecs_to_jiffies(count)); ++} ++ ++static void atom_op_div(atom_exec_context *ctx, int *ptr, int arg) ++{ ++ uint8_t attr = U8((*ptr)++); ++ uint32_t dst, src; ++ SDEBUG(" src1: "); ++ dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1); ++ SDEBUG(" src2: "); ++ src = atom_get_src(ctx, attr, ptr); ++ if(src != 0) { ++ ctx->ctx->divmul[0] = dst/src; ++ ctx->ctx->divmul[1] = dst%src; ++ } else { ++ ctx->ctx->divmul[0] = 0; ++ ctx->ctx->divmul[1] = 0; ++ } ++} ++ ++static void atom_op_eot(atom_exec_context *ctx, int *ptr, int arg) ++{ ++ /* functionally, a nop */ ++} ++ ++static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg) ++{ ++ int execute = 0, target = U16(*ptr); ++ (*ptr)+=2; ++ switch(arg) { ++ case ATOM_COND_ABOVE: ++ execute = ctx->ctx->cs_above; ++ break; ++ case ATOM_COND_ABOVEOREQUAL: ++ execute = ctx->ctx->cs_above || ctx->ctx->cs_equal; ++ break; ++ case ATOM_COND_ALWAYS: ++ execute = 1; ++ break; ++ case ATOM_COND_BELOW: ++ execute = !(ctx->ctx->cs_above || ctx->ctx->cs_equal); ++ break; ++ case ATOM_COND_BELOWOREQUAL: ++ execute = !ctx->ctx->cs_above; ++ break; ++ case ATOM_COND_EQUAL: ++ execute = ctx->ctx->cs_equal; ++ break; ++ case ATOM_COND_NOTEQUAL: ++ execute = !ctx->ctx->cs_equal; ++ break; ++ } ++ if(arg != ATOM_COND_ALWAYS) ++ SDEBUG(" taken: %s\n", execute?"yes":"no"); ++ SDEBUG(" target: 0x%04X\n", target); ++ if(execute) ++ *ptr = ctx->start+target; ++} ++ ++static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg) ++{ ++ uint8_t attr = U8((*ptr)++); ++ uint32_t dst, src1, src2, saved; ++ int dptr = *ptr; ++ SDEBUG(" dst: "); ++ dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); ++ SDEBUG(" src1: "); ++ src1 = atom_get_src(ctx, attr, ptr); ++ SDEBUG(" src2: "); ++ src2 = atom_get_src(ctx, attr, ptr); ++ dst &= src1; ++ dst |= src2; ++ SDEBUG(" dst: "); ++ atom_put_dst(ctx, arg, attr, &dptr, dst, saved); ++} ++ ++static void atom_op_move(atom_exec_context *ctx, int *ptr, int arg) ++{ ++ uint8_t attr = U8((*ptr)++); ++ uint32_t src, saved; ++ int dptr = *ptr; ++ if(((attr>>3)&7) != ATOM_SRC_DWORD) ++ atom_get_dst(ctx, arg, attr, ptr, &saved, 0); ++ else { ++ atom_skip_dst(ctx, arg, attr, ptr); ++ saved = 0xCDCDCDCD; ++ } ++ SDEBUG(" src: "); ++ src = atom_get_src(ctx, attr, ptr); ++ SDEBUG(" dst: "); ++ atom_put_dst(ctx, arg, attr, &dptr, src, saved); ++} ++ ++static void atom_op_mul(atom_exec_context *ctx, int *ptr, int arg) ++{ ++ uint8_t attr = U8((*ptr)++); ++ uint32_t dst, src; ++ SDEBUG(" src1: "); ++ dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1); ++ SDEBUG(" src2: "); ++ src = atom_get_src(ctx, attr, ptr); ++ ctx->ctx->divmul[0] = dst*src; ++} ++ ++static void atom_op_nop(atom_exec_context *ctx, int *ptr, int arg) ++{ ++ /* nothing */ ++} ++ ++static void atom_op_or(atom_exec_context *ctx, int *ptr, int arg) ++{ ++ uint8_t attr = U8((*ptr)++); ++ uint32_t dst, src, saved; ++ int dptr = *ptr; ++ SDEBUG(" dst: "); ++ dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); ++ SDEBUG(" src: "); ++ src = atom_get_src(ctx, attr, ptr); ++ dst |= src; ++ SDEBUG(" dst: "); ++ atom_put_dst(ctx, arg, attr, &dptr, dst, saved); ++} ++ ++static void atom_op_postcard(atom_exec_context *ctx, int *ptr, int arg) ++{ ++ uint8_t val = U8((*ptr)++); ++ SDEBUG("POST card output: 0x%02X\n", val); ++} ++ ++static void atom_op_repeat(atom_exec_context *ctx, int *ptr, int arg) ++{ ++ printk(KERN_INFO "unimplemented!\n"); ++} ++ ++static void atom_op_restorereg(atom_exec_context *ctx, int *ptr, int arg) ++{ ++ printk(KERN_INFO "unimplemented!\n"); ++} ++ ++static void atom_op_savereg(atom_exec_context *ctx, int *ptr, int arg) ++{ ++ printk(KERN_INFO "unimplemented!\n"); ++} ++ ++static void atom_op_setdatablock(atom_exec_context *ctx, int *ptr, int arg) ++{ ++ int idx = U8(*ptr); ++ (*ptr)++; ++ SDEBUG(" block: %d\n", idx); ++ if(!idx) ++ ctx->ctx->data_block = 0; ++ else if(idx==255) ++ ctx->ctx->data_block = ctx->start; ++ else ++ ctx->ctx->data_block = U16(ctx->ctx->data_table + 4 + 2*idx); ++ SDEBUG(" base: 0x%04X\n", ctx->ctx->data_block); ++} ++ ++static void atom_op_setfbbase(atom_exec_context *ctx, int *ptr, int arg) ++{ ++ uint8_t attr = U8((*ptr)++); ++ SDEBUG(" fb_base: "); ++ ctx->ctx->fb_base = atom_get_src(ctx, attr, ptr); ++} ++ ++static void atom_op_setport(atom_exec_context *ctx, int *ptr, int arg) ++{ ++ int port; ++ switch(arg) { ++ case ATOM_PORT_ATI: ++ port = U16(*ptr); ++ if(port < ATOM_IO_NAMES_CNT) ++ SDEBUG(" port: %d (%s)\n", port, atom_io_names[port]); ++ else ++ SDEBUG(" port: %d\n", port); ++ if(!port) ++ ctx->ctx->io_mode = ATOM_IO_MM; ++ else ++ ctx->ctx->io_mode = ATOM_IO_IIO|port; ++ (*ptr)+=2; ++ break; ++ case ATOM_PORT_PCI: ++ ctx->ctx->io_mode = ATOM_IO_PCI; ++ (*ptr)++; ++ break; ++ case ATOM_PORT_SYSIO: ++ ctx->ctx->io_mode = ATOM_IO_SYSIO; ++ (*ptr)++; ++ break; ++ } ++} ++ ++static void atom_op_setregblock(atom_exec_context *ctx, int *ptr, int arg) ++{ ++ ctx->ctx->reg_block = U16(*ptr); ++ (*ptr)+=2; ++ SDEBUG(" base: 0x%04X\n", ctx->ctx->reg_block); ++} ++ ++static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg) ++{ ++ uint8_t attr = U8((*ptr)++), shift; ++ uint32_t saved, dst; ++ int dptr = *ptr; ++ attr &= 0x38; ++ attr |= atom_def_dst[attr>>3]<<6; ++ SDEBUG(" dst: "); ++ dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); ++ shift = U8((*ptr)++); ++ SDEBUG(" shift: %d\n", shift); ++ dst <<= shift; ++ SDEBUG(" dst: "); ++ atom_put_dst(ctx, arg, attr, &dptr, dst, saved); ++} ++ ++static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg) ++{ ++ uint8_t attr = U8((*ptr)++), shift; ++ uint32_t saved, dst; ++ int dptr = *ptr; ++ attr &= 0x38; ++ attr |= atom_def_dst[attr>>3]<<6; ++ SDEBUG(" dst: "); ++ dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); ++ shift = U8((*ptr)++); ++ SDEBUG(" shift: %d\n", shift); ++ dst >>= shift; ++ SDEBUG(" dst: "); ++ atom_put_dst(ctx, arg, attr, &dptr, dst, saved); ++} ++ ++static void atom_op_sub(atom_exec_context *ctx, int *ptr, int arg) ++{ ++ uint8_t attr = U8((*ptr)++); ++ uint32_t dst, src, saved; ++ int dptr = *ptr; ++ SDEBUG(" dst: "); ++ dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); ++ SDEBUG(" src: "); ++ src = atom_get_src(ctx, attr, ptr); ++ dst -= src; ++ SDEBUG(" dst: "); ++ atom_put_dst(ctx, arg, attr, &dptr, dst, saved); ++} ++ ++static void atom_op_switch(atom_exec_context *ctx, int *ptr, int arg) ++{ ++ uint8_t attr = U8((*ptr)++); ++ uint32_t src, val, target; ++ SDEBUG(" switch: "); ++ src = atom_get_src(ctx, attr, ptr); ++ while(U16(*ptr) != ATOM_CASE_END) ++ if(U8(*ptr) == ATOM_CASE_MAGIC) { ++ (*ptr)++; ++ SDEBUG(" case: "); ++ val = atom_get_src(ctx, (attr&0x38)|ATOM_ARG_IMM, ptr); ++ target = U16(*ptr); ++ if(val == src) { ++ SDEBUG(" target: %04X\n", target); ++ *ptr = ctx->start+target; ++ return; ++ } ++ (*ptr) += 2; ++ } else { ++ printk(KERN_INFO "Bad case.\n"); ++ return; ++ } ++ (*ptr) += 2; ++} ++ ++static void atom_op_test(atom_exec_context *ctx, int *ptr, int arg) ++{ ++ uint8_t attr = U8((*ptr)++); ++ uint32_t dst, src; ++ SDEBUG(" src1: "); ++ dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1); ++ SDEBUG(" src2: "); ++ src = atom_get_src(ctx, attr, ptr); ++ ctx->ctx->cs_equal = ((dst & src) == 0); ++ SDEBUG(" result: %s\n", ctx->ctx->cs_equal?"EQ":"NE"); ++} ++ ++static void atom_op_xor(atom_exec_context *ctx, int *ptr, int arg) ++{ ++ uint8_t attr = U8((*ptr)++); ++ uint32_t dst, src, saved; ++ int dptr = *ptr; ++ SDEBUG(" dst: "); ++ dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); ++ SDEBUG(" src: "); ++ src = atom_get_src(ctx, attr, ptr); ++ dst ^= src; ++ SDEBUG(" dst: "); ++ atom_put_dst(ctx, arg, attr, &dptr, dst, saved); ++} ++ ++static void atom_op_debug(atom_exec_context *ctx, int *ptr, int arg) ++{ ++ printk(KERN_INFO "unimplemented!\n"); ++} ++ ++static struct { ++ void (*func)(atom_exec_context *, int *, int); ++ int arg; ++} opcode_table[ATOM_OP_CNT] = { ++ { NULL, 0 }, ++ { atom_op_move, ATOM_ARG_REG }, ++ { atom_op_move, ATOM_ARG_PS }, ++ { atom_op_move, ATOM_ARG_WS }, ++ { atom_op_move, ATOM_ARG_FB }, ++ { atom_op_move, ATOM_ARG_PLL }, ++ { atom_op_move, ATOM_ARG_MC }, ++ { atom_op_and, ATOM_ARG_REG }, ++ { atom_op_and, ATOM_ARG_PS }, ++ { atom_op_and, ATOM_ARG_WS }, ++ { atom_op_and, ATOM_ARG_FB }, ++ { atom_op_and, ATOM_ARG_PLL }, ++ { atom_op_and, ATOM_ARG_MC }, ++ { atom_op_or, ATOM_ARG_REG }, ++ { atom_op_or, ATOM_ARG_PS }, ++ { atom_op_or, ATOM_ARG_WS }, ++ { atom_op_or, ATOM_ARG_FB }, ++ { atom_op_or, ATOM_ARG_PLL }, ++ { atom_op_or, ATOM_ARG_MC }, ++ { atom_op_shl, ATOM_ARG_REG }, ++ { atom_op_shl, ATOM_ARG_PS }, ++ { atom_op_shl, ATOM_ARG_WS }, ++ { atom_op_shl, ATOM_ARG_FB }, ++ { atom_op_shl, ATOM_ARG_PLL }, ++ { atom_op_shl, ATOM_ARG_MC }, ++ { atom_op_shr, ATOM_ARG_REG }, ++ { atom_op_shr, ATOM_ARG_PS }, ++ { atom_op_shr, ATOM_ARG_WS }, ++ { atom_op_shr, ATOM_ARG_FB }, ++ { atom_op_shr, ATOM_ARG_PLL }, ++ { atom_op_shr, ATOM_ARG_MC }, ++ { atom_op_mul, ATOM_ARG_REG }, ++ { atom_op_mul, ATOM_ARG_PS }, ++ { atom_op_mul, ATOM_ARG_WS }, ++ { atom_op_mul, ATOM_ARG_FB }, ++ { atom_op_mul, ATOM_ARG_PLL }, ++ { atom_op_mul, ATOM_ARG_MC }, ++ { atom_op_div, ATOM_ARG_REG }, ++ { atom_op_div, ATOM_ARG_PS }, ++ { atom_op_div, ATOM_ARG_WS }, ++ { atom_op_div, ATOM_ARG_FB }, ++ { atom_op_div, ATOM_ARG_PLL }, ++ { atom_op_div, ATOM_ARG_MC }, ++ { atom_op_add, ATOM_ARG_REG }, ++ { atom_op_add, ATOM_ARG_PS }, ++ { atom_op_add, ATOM_ARG_WS }, ++ { atom_op_add, ATOM_ARG_FB }, ++ { atom_op_add, ATOM_ARG_PLL }, ++ { atom_op_add, ATOM_ARG_MC }, ++ { atom_op_sub, ATOM_ARG_REG }, ++ { atom_op_sub, ATOM_ARG_PS }, ++ { atom_op_sub, ATOM_ARG_WS }, ++ { atom_op_sub, ATOM_ARG_FB }, ++ { atom_op_sub, ATOM_ARG_PLL }, ++ { atom_op_sub, ATOM_ARG_MC }, ++ { atom_op_setport, ATOM_PORT_ATI }, ++ { atom_op_setport, ATOM_PORT_PCI }, ++ { atom_op_setport, ATOM_PORT_SYSIO }, ++ { atom_op_setregblock, 0 }, ++ { atom_op_setfbbase, 0 }, ++ { atom_op_compare, ATOM_ARG_REG }, ++ { atom_op_compare, ATOM_ARG_PS }, ++ { atom_op_compare, ATOM_ARG_WS }, ++ { atom_op_compare, ATOM_ARG_FB }, ++ { atom_op_compare, ATOM_ARG_PLL }, ++ { atom_op_compare, ATOM_ARG_MC }, ++ { atom_op_switch, 0 }, ++ { atom_op_jump, ATOM_COND_ALWAYS }, ++ { atom_op_jump, ATOM_COND_EQUAL }, ++ { atom_op_jump, ATOM_COND_BELOW }, ++ { atom_op_jump, ATOM_COND_ABOVE }, ++ { atom_op_jump, ATOM_COND_BELOWOREQUAL }, ++ { atom_op_jump, ATOM_COND_ABOVEOREQUAL }, ++ { atom_op_jump, ATOM_COND_NOTEQUAL }, ++ { atom_op_test, ATOM_ARG_REG }, ++ { atom_op_test, ATOM_ARG_PS }, ++ { atom_op_test, ATOM_ARG_WS }, ++ { atom_op_test, ATOM_ARG_FB }, ++ { atom_op_test, ATOM_ARG_PLL }, ++ { atom_op_test, ATOM_ARG_MC }, ++ { atom_op_delay, ATOM_UNIT_MILLISEC }, ++ { atom_op_delay, ATOM_UNIT_MICROSEC }, ++ { atom_op_calltable, 0 }, ++ { atom_op_repeat, 0 }, ++ { atom_op_clear, ATOM_ARG_REG }, ++ { atom_op_clear, ATOM_ARG_PS }, ++ { atom_op_clear, ATOM_ARG_WS }, ++ { atom_op_clear, ATOM_ARG_FB }, ++ { atom_op_clear, ATOM_ARG_PLL }, ++ { atom_op_clear, ATOM_ARG_MC }, ++ { atom_op_nop, 0 }, ++ { atom_op_eot, 0 }, ++ { atom_op_mask, ATOM_ARG_REG }, ++ { atom_op_mask, ATOM_ARG_PS }, ++ { atom_op_mask, ATOM_ARG_WS }, ++ { atom_op_mask, ATOM_ARG_FB }, ++ { atom_op_mask, ATOM_ARG_PLL }, ++ { atom_op_mask, ATOM_ARG_MC }, ++ { atom_op_postcard, 0 }, ++ { atom_op_beep, 0 }, ++ { atom_op_savereg, 0 }, ++ { atom_op_restorereg, 0 }, ++ { atom_op_setdatablock, 0 }, ++ { atom_op_xor, ATOM_ARG_REG }, ++ { atom_op_xor, ATOM_ARG_PS }, ++ { atom_op_xor, ATOM_ARG_WS }, ++ { atom_op_xor, ATOM_ARG_FB }, ++ { atom_op_xor, ATOM_ARG_PLL }, ++ { atom_op_xor, ATOM_ARG_MC }, ++ { atom_op_shl, ATOM_ARG_REG }, ++ { atom_op_shl, ATOM_ARG_PS }, ++ { atom_op_shl, ATOM_ARG_WS }, ++ { atom_op_shl, ATOM_ARG_FB }, ++ { atom_op_shl, ATOM_ARG_PLL }, ++ { atom_op_shl, ATOM_ARG_MC }, ++ { atom_op_shr, ATOM_ARG_REG }, ++ { atom_op_shr, ATOM_ARG_PS }, ++ { atom_op_shr, ATOM_ARG_WS }, ++ { atom_op_shr, ATOM_ARG_FB }, ++ { atom_op_shr, ATOM_ARG_PLL }, ++ { atom_op_shr, ATOM_ARG_MC }, ++ { atom_op_debug, 0 }, ++}; ++ ++void atom_execute_table(struct atom_context *ctx, int index, uint32_t *params) ++{ ++ int base = CU16(ctx->cmd_table+4+2*index); ++ int len, ws, ps, ptr; ++ unsigned char op; ++ atom_exec_context ectx; ++ ++ if(!base) ++ return; ++ ++ len = CU16(base+ATOM_CT_SIZE_PTR); ++ ws = CU8(base+ATOM_CT_WS_PTR); ++ ps = CU8(base+ATOM_CT_PS_PTR) & ATOM_CT_PS_MASK; ++ ptr = base+ATOM_CT_CODE_PTR; ++ ++ SDEBUG(">> execute %04X (len %d, WS %d, PS %d)\n", base, len, ws, ps); ++ ++ /* reset reg block */ ++ ctx->reg_block = 0; ++ ectx.ctx = ctx; ++ ectx.ps_shift = ps/4; ++ ectx.start = base; ++ ectx.ps = params; ++ if(ws) ++ ectx.ws = kzalloc(4*ws, GFP_KERNEL); ++ else ++ ectx.ws = NULL; ++ ++ debug_depth++; ++ while(1) { ++ op = CU8(ptr++); ++ if(op0) ++ opcode_table[op].func(&ectx, &ptr, opcode_table[op].arg); ++ else ++ break; ++ ++ if(op == ATOM_OP_EOT) ++ break; ++ } ++ debug_depth--; ++ SDEBUG("<<\n"); ++ ++ if(ws) ++ kfree(ectx.ws); ++} ++ ++static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 }; ++static void atom_index_iio(struct atom_context *ctx, int base) ++{ ++ ctx->iio = kzalloc(2*256, GFP_KERNEL); ++ while(CU8(base) == ATOM_IIO_START) { ++ ctx->iio[CU8(base+1)] = base+2; ++ base += 2; ++ while(CU8(base) != ATOM_IIO_END) ++ base += atom_iio_len[CU8(base)]; ++ base += 3; ++ } ++} ++ ++struct atom_context *atom_parse(struct card_info *card, void *bios) ++{ ++ int base; ++ struct atom_context *ctx = kzalloc(sizeof(struct atom_context), GFP_KERNEL); ++ char *str; ++ ++ ctx->card = card; ++ ctx->bios = bios; ++ ++ if(CU16(0) != ATOM_BIOS_MAGIC) { ++ printk(KERN_INFO "Invalid BIOS magic.\n"); ++ kfree(ctx); ++ return NULL; ++ } ++ if(strncmp(CSTR(ATOM_ATI_MAGIC_PTR), ATOM_ATI_MAGIC, strlen(ATOM_ATI_MAGIC))) { ++ printk(KERN_INFO "Invalid ATI magic.\n"); ++ kfree(ctx); ++ return NULL; ++ } ++ ++ base = CU16(ATOM_ROM_TABLE_PTR); ++ if(strncmp(CSTR(base+ATOM_ROM_MAGIC_PTR), ATOM_ROM_MAGIC, strlen(ATOM_ROM_MAGIC))) { ++ printk(KERN_INFO "Invalid ATOM magic.\n"); ++ kfree(ctx); ++ return NULL; ++ } ++ ++ ctx->cmd_table = CU16(base+ATOM_ROM_CMD_PTR); ++ ctx->data_table = CU16(base+ATOM_ROM_DATA_PTR); ++ atom_index_iio(ctx, CU16(ctx->data_table+ATOM_DATA_IIO_PTR)+4); ++ ++ str = CSTR(CU16(base+ATOM_ROM_MSG_PTR)); ++ while(*str && ((*str == '\n') || (*str == '\r'))) ++ str++; ++ printk(KERN_INFO "ATOM BIOS: %s", str); ++ ++ return ctx; ++} ++ ++int atom_asic_init(struct atom_context *ctx) ++{ ++ int hwi = CU16(ctx->data_table + ATOM_DATA_FWI_PTR); ++ uint32_t ps[16]; ++ memset(ps, 0, 64); ++ ++ ps[0] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFSCLK_PTR)); ++ ps[1] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFMCLK_PTR)); ++ if(!ps[0] || !ps[1]) ++ return 1; ++ ++ if(!CU16(ctx->cmd_table+4+2*ATOM_CMD_INIT)) ++ return 1; ++ atom_execute_table(ctx, ATOM_CMD_INIT, ps); ++ ++ return 0; ++} ++ ++void atom_destroy(struct atom_context *ctx) ++{ ++ if(ctx->iio) ++ kfree(ctx->iio); ++ kfree(ctx); ++} ++ ++ ++void atom_parse_data_header(struct atom_context *ctx, int index, uint16_t *size, uint8_t *frev, uint8_t *crev, uint16_t *data_start) ++{ ++ int offset = index * 2 + 4; ++ int idx = CU16(ctx->data_table + offset); ++ ++ if (size) ++ *size = CU16(idx); ++ if (frev) ++ *frev = CU8(idx + 2); ++ if (crev) ++ *crev = CU8(idx + 3); ++ *data_start = idx; ++ return; ++} ++ ++void atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t *frev, uint8_t *crev) ++{ ++ int offset = index * 2 + 4; ++ int idx = CU16(ctx->cmd_table + offset); ++ ++ if (frev) ++ *frev = CU8(idx + 2); ++ if (crev) ++ *crev = CU8(idx + 3); ++ return; ++} +diff --git a/drivers/gpu/drm/radeon/atom.h b/drivers/gpu/drm/radeon/atom.h +new file mode 100644 +index 0000000..289de33 +--- /dev/null ++++ b/drivers/gpu/drm/radeon/atom.h +@@ -0,0 +1,150 @@ ++/* ++ * Copyright 2008 Advanced Micro Devices, Inc. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ * ++ * Author: Stanislaw Skowronek ++ */ ++ ++#ifndef ATOM_H ++#define ATOM_H ++ ++#include ++#include "drmP.h" ++ ++#define ATOM_BIOS_MAGIC 0xAA55 ++#define ATOM_ATI_MAGIC_PTR 0x30 ++#define ATOM_ATI_MAGIC " 761295520" ++#define ATOM_ROM_TABLE_PTR 0x48 ++ ++#define ATOM_ROM_MAGIC "ATOM" ++#define ATOM_ROM_MAGIC_PTR 4 ++ ++#define ATOM_ROM_MSG_PTR 0x10 ++#define ATOM_ROM_CMD_PTR 0x1E ++#define ATOM_ROM_DATA_PTR 0x20 ++ ++#define ATOM_CMD_INIT 0 ++#define ATOM_CMD_SETSCLK 0x0A ++#define ATOM_CMD_SETMCLK 0x0B ++#define ATOM_CMD_SETPCLK 0x0C ++ ++#define ATOM_DATA_FWI_PTR 0xC ++#define ATOM_DATA_IIO_PTR 0x32 ++ ++#define ATOM_FWI_DEFSCLK_PTR 8 ++#define ATOM_FWI_DEFMCLK_PTR 0xC ++#define ATOM_FWI_MAXSCLK_PTR 0x24 ++#define ATOM_FWI_MAXMCLK_PTR 0x28 ++ ++#define ATOM_CT_SIZE_PTR 0 ++#define ATOM_CT_WS_PTR 4 ++#define ATOM_CT_PS_PTR 5 ++#define ATOM_CT_PS_MASK 0x7F ++#define ATOM_CT_CODE_PTR 6 ++ ++#define ATOM_OP_CNT 123 ++#define ATOM_OP_EOT 91 ++ ++#define ATOM_CASE_MAGIC 0x63 ++#define ATOM_CASE_END 0x5A5A ++ ++#define ATOM_ARG_REG 0 ++#define ATOM_ARG_PS 1 ++#define ATOM_ARG_WS 2 ++#define ATOM_ARG_FB 3 ++#define ATOM_ARG_ID 4 ++#define ATOM_ARG_IMM 5 ++#define ATOM_ARG_PLL 6 ++#define ATOM_ARG_MC 7 ++ ++#define ATOM_SRC_DWORD 0 ++#define ATOM_SRC_WORD0 1 ++#define ATOM_SRC_WORD8 2 ++#define ATOM_SRC_WORD16 3 ++#define ATOM_SRC_BYTE0 4 ++#define ATOM_SRC_BYTE8 5 ++#define ATOM_SRC_BYTE16 6 ++#define ATOM_SRC_BYTE24 7 ++ ++#define ATOM_WS_QUOTIENT 0x40 ++#define ATOM_WS_REMAINDER 0x41 ++#define ATOM_WS_DATAPTR 0x42 ++#define ATOM_WS_SHIFT 0x43 ++#define ATOM_WS_OR_MASK 0x44 ++#define ATOM_WS_AND_MASK 0x45 ++#define ATOM_WS_FB_WINDOW 0x46 ++#define ATOM_WS_ATTRIBUTES 0x47 ++ ++#define ATOM_IIO_NOP 0 ++#define ATOM_IIO_START 1 ++#define ATOM_IIO_READ 2 ++#define ATOM_IIO_WRITE 3 ++#define ATOM_IIO_CLEAR 4 ++#define ATOM_IIO_SET 5 ++#define ATOM_IIO_MOVE_INDEX 6 ++#define ATOM_IIO_MOVE_ATTR 7 ++#define ATOM_IIO_MOVE_DATA 8 ++#define ATOM_IIO_END 9 ++ ++#define ATOM_IO_MM 0 ++#define ATOM_IO_PCI 1 ++#define ATOM_IO_SYSIO 2 ++#define ATOM_IO_IIO 0x80 ++ ++struct card_info { ++ struct drm_device *dev; ++ void (* reg_write)(struct card_info *, uint32_t, uint32_t); // filled by driver ++ uint32_t (* reg_read)(struct card_info *, uint32_t); // filled by driver ++ void (* mc_write)(struct card_info *, uint32_t, uint32_t); // filled by driver ++ uint32_t (* mc_read)(struct card_info *, uint32_t); // filled by driver ++ void (* pll_write)(struct card_info *, uint32_t, uint32_t); // filled by driver ++ uint32_t (* pll_read)(struct card_info *, uint32_t); // filled by driver ++// int (* read_rom)(struct card_info *, uint8_t *); // filled by driver ++}; ++ ++struct atom_context { ++ struct card_info *card; ++ void *bios; ++ uint32_t cmd_table, data_table; ++ uint16_t *iio; ++ ++ uint16_t data_block; ++ uint32_t fb_base; ++ uint32_t divmul[2]; ++ uint16_t io_attr; ++ uint16_t reg_block; ++ uint8_t shift; ++ int cs_equal, cs_above; ++ int io_mode; ++}; ++ ++extern int atom_debug; ++ ++struct atom_context *atom_parse(struct card_info *, void *); ++void atom_execute_table(struct atom_context *, int, uint32_t *); ++int atom_asic_init(struct atom_context *); ++void atom_destroy(struct atom_context *); ++void atom_parse_data_header(struct atom_context *ctx, int index, uint16_t *size, uint8_t *frev, uint8_t *crev, uint16_t *data_start); ++void atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t *frev, uint8_t *crev); ++#include "atom-types.h" ++#include "atombios.h" ++#include "ObjectID.h" ++ ++#endif +diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h +new file mode 100644 +index 0000000..9932b09 +--- /dev/null ++++ b/drivers/gpu/drm/radeon/atombios.h +@@ -0,0 +1,5025 @@ ++/* ++ * Copyright 2006-2007 Advanced Micro Devices, Inc. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++ ++/****************************************************************************/ ++/*Portion I: Definitions shared between VBIOS and Driver */ ++/****************************************************************************/ ++ ++ ++#ifndef _ATOMBIOS_H ++#define _ATOMBIOS_H ++ ++#define ATOM_VERSION_MAJOR 0x00020000 ++#define ATOM_VERSION_MINOR 0x00000002 ++ ++#define ATOM_HEADER_VERSION (ATOM_VERSION_MAJOR | ATOM_VERSION_MINOR) ++ ++/* Endianness should be specified before inclusion, ++ * default to little endian ++ */ ++#ifndef ATOM_BIG_ENDIAN ++#error Endian not specified ++#endif ++ ++#ifdef _H2INC ++ #ifndef ULONG ++ typedef unsigned long ULONG; ++ #endif ++ ++ #ifndef UCHAR ++ typedef unsigned char UCHAR; ++ #endif ++ ++ #ifndef USHORT ++ typedef unsigned short USHORT; ++ #endif ++#endif ++ ++#define ATOM_DAC_A 0 ++#define ATOM_DAC_B 1 ++#define ATOM_EXT_DAC 2 ++ ++#define ATOM_CRTC1 0 ++#define ATOM_CRTC2 1 ++ ++#define ATOM_DIGA 0 ++#define ATOM_DIGB 1 ++ ++#define ATOM_PPLL1 0 ++#define ATOM_PPLL2 1 ++ ++#define ATOM_SCALER1 0 ++#define ATOM_SCALER2 1 ++ ++#define ATOM_SCALER_DISABLE 0 ++#define ATOM_SCALER_CENTER 1 ++#define ATOM_SCALER_EXPANSION 2 ++#define ATOM_SCALER_MULTI_EX 3 ++ ++#define ATOM_DISABLE 0 ++#define ATOM_ENABLE 1 ++#define ATOM_LCD_BLOFF (ATOM_DISABLE+2) ++#define ATOM_LCD_BLON (ATOM_ENABLE+2) ++#define ATOM_LCD_BL_BRIGHTNESS_CONTROL (ATOM_ENABLE+3) ++#define ATOM_LCD_SELFTEST_START (ATOM_DISABLE+5) ++#define ATOM_LCD_SELFTEST_STOP (ATOM_ENABLE+5) ++#define ATOM_ENCODER_INIT (ATOM_DISABLE+7) ++ ++#define ATOM_BLANKING 1 ++#define ATOM_BLANKING_OFF 0 ++ ++#define ATOM_CURSOR1 0 ++#define ATOM_CURSOR2 1 ++ ++#define ATOM_ICON1 0 ++#define ATOM_ICON2 1 ++ ++#define ATOM_CRT1 0 ++#define ATOM_CRT2 1 ++ ++#define ATOM_TV_NTSC 1 ++#define ATOM_TV_NTSCJ 2 ++#define ATOM_TV_PAL 3 ++#define ATOM_TV_PALM 4 ++#define ATOM_TV_PALCN 5 ++#define ATOM_TV_PALN 6 ++#define ATOM_TV_PAL60 7 ++#define ATOM_TV_SECAM 8 ++#define ATOM_TV_CV 16 ++ ++#define ATOM_DAC1_PS2 1 ++#define ATOM_DAC1_CV 2 ++#define ATOM_DAC1_NTSC 3 ++#define ATOM_DAC1_PAL 4 ++ ++#define ATOM_DAC2_PS2 ATOM_DAC1_PS2 ++#define ATOM_DAC2_CV ATOM_DAC1_CV ++#define ATOM_DAC2_NTSC ATOM_DAC1_NTSC ++#define ATOM_DAC2_PAL ATOM_DAC1_PAL ++ ++#define ATOM_PM_ON 0 ++#define ATOM_PM_STANDBY 1 ++#define ATOM_PM_SUSPEND 2 ++#define ATOM_PM_OFF 3 ++ ++/* Bit0:{=0:single, =1:dual}, ++ Bit1 {=0:666RGB, =1:888RGB}, ++ Bit2:3:{Grey level} ++ Bit4:{=0:LDI format for RGB888, =1 FPDI format for RGB888}*/ ++ ++#define ATOM_PANEL_MISC_DUAL 0x00000001 ++#define ATOM_PANEL_MISC_888RGB 0x00000002 ++#define ATOM_PANEL_MISC_GREY_LEVEL 0x0000000C ++#define ATOM_PANEL_MISC_FPDI 0x00000010 ++#define ATOM_PANEL_MISC_GREY_LEVEL_SHIFT 2 ++#define ATOM_PANEL_MISC_SPATIAL 0x00000020 ++#define ATOM_PANEL_MISC_TEMPORAL 0x00000040 ++#define ATOM_PANEL_MISC_API_ENABLED 0x00000080 ++ ++ ++#define MEMTYPE_DDR1 "DDR1" ++#define MEMTYPE_DDR2 "DDR2" ++#define MEMTYPE_DDR3 "DDR3" ++#define MEMTYPE_DDR4 "DDR4" ++ ++#define ASIC_BUS_TYPE_PCI "PCI" ++#define ASIC_BUS_TYPE_AGP "AGP" ++#define ASIC_BUS_TYPE_PCIE "PCI_EXPRESS" ++ ++/* Maximum size of that FireGL flag string */ ++ ++#define ATOM_FIREGL_FLAG_STRING "FGL" //Flag used to enable FireGL Support ++#define ATOM_MAX_SIZE_OF_FIREGL_FLAG_STRING 3 //sizeof( ATOM_FIREGL_FLAG_STRING ) ++ ++#define ATOM_FAKE_DESKTOP_STRING "DSK" //Flag used to enable mobile ASIC on Desktop ++#define ATOM_MAX_SIZE_OF_FAKE_DESKTOP_STRING ATOM_MAX_SIZE_OF_FIREGL_FLAG_STRING ++ ++#define ATOM_M54T_FLAG_STRING "M54T" //Flag used to enable M54T Support ++#define ATOM_MAX_SIZE_OF_M54T_FLAG_STRING 4 //sizeof( ATOM_M54T_FLAG_STRING ) ++ ++#define HW_ASSISTED_I2C_STATUS_FAILURE 2 ++#define HW_ASSISTED_I2C_STATUS_SUCCESS 1 ++ ++#pragma pack(1) /* BIOS data must use byte aligment */ ++ ++/* Define offset to location of ROM header. */ ++ ++#define OFFSET_TO_POINTER_TO_ATOM_ROM_HEADER 0x00000048L ++#define OFFSET_TO_ATOM_ROM_IMAGE_SIZE 0x00000002L ++ ++#define OFFSET_TO_ATOMBIOS_ASIC_BUS_MEM_TYPE 0x94 ++#define MAXSIZE_OF_ATOMBIOS_ASIC_BUS_MEM_TYPE 20 /* including the terminator 0x0! */ ++#define OFFSET_TO_GET_ATOMBIOS_STRINGS_NUMBER 0x002f ++#define OFFSET_TO_GET_ATOMBIOS_STRINGS_START 0x006e ++ ++/* Common header for all ROM Data tables. ++ Every table pointed _ATOM_MASTER_DATA_TABLE has this common header. ++ And the pointer actually points to this header. */ ++ ++typedef struct _ATOM_COMMON_TABLE_HEADER ++{ ++ USHORT usStructureSize; ++ UCHAR ucTableFormatRevision; /*Change it when the Parser is not backward compatible */ ++ UCHAR ucTableContentRevision; /*Change it only when the table needs to change but the firmware */ ++ /*Image can't be updated, while Driver needs to carry the new table! */ ++}ATOM_COMMON_TABLE_HEADER; ++ ++typedef struct _ATOM_ROM_HEADER ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ UCHAR uaFirmWareSignature[4]; /*Signature to distinguish between Atombios and non-atombios, ++ atombios should init it as "ATOM", don't change the position */ ++ USHORT usBiosRuntimeSegmentAddress; ++ USHORT usProtectedModeInfoOffset; ++ USHORT usConfigFilenameOffset; ++ USHORT usCRC_BlockOffset; ++ USHORT usBIOS_BootupMessageOffset; ++ USHORT usInt10Offset; ++ USHORT usPciBusDevInitCode; ++ USHORT usIoBaseAddress; ++ USHORT usSubsystemVendorID; ++ USHORT usSubsystemID; ++ USHORT usPCI_InfoOffset; ++ USHORT usMasterCommandTableOffset; /*Offset for SW to get all command table offsets, Don't change the position */ ++ USHORT usMasterDataTableOffset; /*Offset for SW to get all data table offsets, Don't change the position */ ++ UCHAR ucExtendedFunctionCode; ++ UCHAR ucReserved; ++}ATOM_ROM_HEADER; ++ ++/*==============================Command Table Portion==================================== */ ++ ++#ifdef UEFI_BUILD ++ #define UTEMP USHORT ++ #define USHORT void* ++#endif ++ ++typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES{ ++ USHORT ASIC_Init; //Function Table, used by various SW components,latest version 1.1 ++ USHORT GetDisplaySurfaceSize; //Atomic Table, Used by Bios when enabling HW ICON ++ USHORT ASIC_RegistersInit; //Atomic Table, indirectly used by various SW components,called from ASIC_Init ++ USHORT VRAM_BlockVenderDetection; //Atomic Table, used only by Bios ++ USHORT DIGxEncoderControl; //Only used by Bios ++ USHORT MemoryControllerInit; //Atomic Table, indirectly used by various SW components,called from ASIC_Init ++ USHORT EnableCRTCMemReq; //Function Table,directly used by various SW components,latest version 2.1 ++ USHORT MemoryParamAdjust; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock if needed ++ USHORT DVOEncoderControl; //Function Table,directly used by various SW components,latest version 1.2 ++ USHORT GPIOPinControl; //Atomic Table, only used by Bios ++ USHORT SetEngineClock; //Function Table,directly used by various SW components,latest version 1.1 ++ USHORT SetMemoryClock; //Function Table,directly used by various SW components,latest version 1.1 ++ USHORT SetPixelClock; //Function Table,directly used by various SW components,latest version 1.2 ++ USHORT DynamicClockGating; //Atomic Table, indirectly used by various SW components,called from ASIC_Init ++ USHORT ResetMemoryDLL; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock ++ USHORT ResetMemoryDevice; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock ++ USHORT MemoryPLLInit; ++ USHORT AdjustDisplayPll; //only used by Bios ++ USHORT AdjustMemoryController; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock ++ USHORT EnableASIC_StaticPwrMgt; //Atomic Table, only used by Bios ++ USHORT ASIC_StaticPwrMgtStatusChange; //Obsolete , only used by Bios ++ USHORT DAC_LoadDetection; //Atomic Table, directly used by various SW components,latest version 1.2 ++ USHORT LVTMAEncoderControl; //Atomic Table,directly used by various SW components,latest version 1.3 ++ USHORT LCD1OutputControl; //Atomic Table, directly used by various SW components,latest version 1.1 ++ USHORT DAC1EncoderControl; //Atomic Table, directly used by various SW components,latest version 1.1 ++ USHORT DAC2EncoderControl; //Atomic Table, directly used by various SW components,latest version 1.1 ++ USHORT DVOOutputControl; //Atomic Table, directly used by various SW components,latest version 1.1 ++ USHORT CV1OutputControl; //Atomic Table, directly used by various SW components,latest version 1.1 ++ USHORT GetConditionalGoldenSetting; //only used by Bios ++ USHORT TVEncoderControl; //Function Table,directly used by various SW components,latest version 1.1 ++ USHORT TMDSAEncoderControl; //Atomic Table, directly used by various SW components,latest version 1.3 ++ USHORT LVDSEncoderControl; //Atomic Table, directly used by various SW components,latest version 1.3 ++ USHORT TV1OutputControl; //Atomic Table, directly used by various SW components,latest version 1.1 ++ USHORT EnableScaler; //Atomic Table, used only by Bios ++ USHORT BlankCRTC; //Atomic Table, directly used by various SW components,latest version 1.1 ++ USHORT EnableCRTC; //Atomic Table, directly used by various SW components,latest version 1.1 ++ USHORT GetPixelClock; //Atomic Table, directly used by various SW components,latest version 1.1 ++ USHORT EnableVGA_Render; //Function Table,directly used by various SW components,latest version 1.1 ++ USHORT EnableVGA_Access; //Obsolete , only used by Bios ++ USHORT SetCRTC_Timing; //Atomic Table, directly used by various SW components,latest version 1.1 ++ USHORT SetCRTC_OverScan; //Atomic Table, used by various SW components,latest version 1.1 ++ USHORT SetCRTC_Replication; //Atomic Table, used only by Bios ++ USHORT SelectCRTC_Source; //Atomic Table, directly used by various SW components,latest version 1.1 ++ USHORT EnableGraphSurfaces; //Atomic Table, used only by Bios ++ USHORT UpdateCRTC_DoubleBufferRegisters; ++ USHORT LUT_AutoFill; //Atomic Table, only used by Bios ++ USHORT EnableHW_IconCursor; //Atomic Table, only used by Bios ++ USHORT GetMemoryClock; //Atomic Table, directly used by various SW components,latest version 1.1 ++ USHORT GetEngineClock; //Atomic Table, directly used by various SW components,latest version 1.1 ++ USHORT SetCRTC_UsingDTDTiming; //Atomic Table, directly used by various SW components,latest version 1.1 ++ USHORT ExternalEncoderControl; //Atomic Table, directly used by various SW components,latest version 2.1 ++ USHORT LVTMAOutputControl; //Atomic Table, directly used by various SW components,latest version 1.1 ++ USHORT VRAM_BlockDetectionByStrap; //Atomic Table, used only by Bios ++ USHORT MemoryCleanUp; //Atomic Table, only used by Bios ++ USHORT ProcessI2cChannelTransaction; //Function Table,only used by Bios ++ USHORT WriteOneByteToHWAssistedI2C; //Function Table,indirectly used by various SW components ++ USHORT ReadHWAssistedI2CStatus; //Atomic Table, indirectly used by various SW components ++ USHORT SpeedFanControl; //Function Table,indirectly used by various SW components,called from ASIC_Init ++ USHORT PowerConnectorDetection; //Atomic Table, directly used by various SW components,latest version 1.1 ++ USHORT MC_Synchronization; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock ++ USHORT ComputeMemoryEnginePLL; //Atomic Table, indirectly used by various SW components,called from SetMemory/EngineClock ++ USHORT MemoryRefreshConversion; //Atomic Table, indirectly used by various SW components,called from SetMemory or SetEngineClock ++ USHORT VRAM_GetCurrentInfoBlock; //Atomic Table, used only by Bios ++ USHORT DynamicMemorySettings; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock ++ USHORT MemoryTraining; //Atomic Table, used only by Bios ++ USHORT EnableSpreadSpectrumOnPPLL; //Atomic Table, directly used by various SW components,latest version 1.2 ++ USHORT TMDSAOutputControl; //Atomic Table, directly used by various SW components,latest version 1.1 ++ USHORT SetVoltage; //Function Table,directly and/or indirectly used by various SW components,latest version 1.1 ++ USHORT DAC1OutputControl; //Atomic Table, directly used by various SW components,latest version 1.1 ++ USHORT DAC2OutputControl; //Atomic Table, directly used by various SW components,latest version 1.1 ++ USHORT SetupHWAssistedI2CStatus; //Function Table,only used by Bios, obsolete soon.Switch to use "ReadEDIDFromHWAssistedI2C" ++ USHORT ClockSource; //Atomic Table, indirectly used by various SW components,called from ASIC_Init ++ USHORT MemoryDeviceInit; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock ++ USHORT EnableYUV; //Atomic Table, indirectly used by various SW components,called from EnableVGARender ++ USHORT DIG1EncoderControl; //Atomic Table,directly used by various SW components,latest version 1.1 ++ USHORT DIG2EncoderControl; //Atomic Table,directly used by various SW components,latest version 1.1 ++ USHORT DIG1TransmitterControl; //Atomic Table,directly used by various SW components,latest version 1.1 ++ USHORT DIG2TransmitterControl; //Atomic Table,directly used by various SW components,latest version 1.1 ++ USHORT ProcessAuxChannelTransaction; //Function Table,only used by Bios ++ USHORT DPEncoderService; //Function Table,only used by Bios ++}ATOM_MASTER_LIST_OF_COMMAND_TABLES; ++ ++// For backward compatible ++#define ReadEDIDFromHWAssistedI2C ProcessI2cChannelTransaction ++#define UNIPHYTransmitterControl DIG1TransmitterControl ++#define LVTMATransmitterControl DIG2TransmitterControl ++#define SetCRTC_DPM_State GetConditionalGoldenSetting ++#define SetUniphyInstance ASIC_StaticPwrMgtStatusChange ++ ++typedef struct _ATOM_MASTER_COMMAND_TABLE ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ ATOM_MASTER_LIST_OF_COMMAND_TABLES ListOfCommandTables; ++}ATOM_MASTER_COMMAND_TABLE; ++ ++/****************************************************************************/ ++// Structures used in every command table ++/****************************************************************************/ ++typedef struct _ATOM_TABLE_ATTRIBUTE ++{ ++#if ATOM_BIG_ENDIAN ++ USHORT UpdatedByUtility:1; //[15]=Table updated by utility flag ++ USHORT PS_SizeInBytes:7; //[14:8]=Size of parameter space in Bytes (multiple of a dword), ++ USHORT WS_SizeInBytes:8; //[7:0]=Size of workspace in Bytes (in multiple of a dword), ++#else ++ USHORT WS_SizeInBytes:8; //[7:0]=Size of workspace in Bytes (in multiple of a dword), ++ USHORT PS_SizeInBytes:7; //[14:8]=Size of parameter space in Bytes (multiple of a dword), ++ USHORT UpdatedByUtility:1; //[15]=Table updated by utility flag ++#endif ++}ATOM_TABLE_ATTRIBUTE; ++ ++typedef union _ATOM_TABLE_ATTRIBUTE_ACCESS ++{ ++ ATOM_TABLE_ATTRIBUTE sbfAccess; ++ USHORT susAccess; ++}ATOM_TABLE_ATTRIBUTE_ACCESS; ++ ++/****************************************************************************/ ++// Common header for all command tables. ++// Every table pointed by _ATOM_MASTER_COMMAND_TABLE has this common header. ++// And the pointer actually points to this header. ++/****************************************************************************/ ++typedef struct _ATOM_COMMON_ROM_COMMAND_TABLE_HEADER ++{ ++ ATOM_COMMON_TABLE_HEADER CommonHeader; ++ ATOM_TABLE_ATTRIBUTE TableAttribute; ++}ATOM_COMMON_ROM_COMMAND_TABLE_HEADER; ++ ++/****************************************************************************/ ++// Structures used by ComputeMemoryEnginePLLTable ++/****************************************************************************/ ++#define COMPUTE_MEMORY_PLL_PARAM 1 ++#define COMPUTE_ENGINE_PLL_PARAM 2 ++ ++typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS ++{ ++ ULONG ulClock; //When returen, it's the re-calculated clock based on given Fb_div Post_Div and ref_div ++ UCHAR ucAction; //0:reserved //1:Memory //2:Engine ++ UCHAR ucReserved; //may expand to return larger Fbdiv later ++ UCHAR ucFbDiv; //return value ++ UCHAR ucPostDiv; //return value ++}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS; ++ ++typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V2 ++{ ++ ULONG ulClock; //When return, [23:0] return real clock ++ UCHAR ucAction; //0:reserved;COMPUTE_MEMORY_PLL_PARAM:Memory;COMPUTE_ENGINE_PLL_PARAM:Engine. it return ref_div to be written to register ++ USHORT usFbDiv; //return Feedback value to be written to register ++ UCHAR ucPostDiv; //return post div to be written to register ++}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V2; ++#define COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS ++ ++ ++#define SET_CLOCK_FREQ_MASK 0x00FFFFFF //Clock change tables only take bit [23:0] as the requested clock value ++#define USE_NON_BUS_CLOCK_MASK 0x01000000 //Applicable to both memory and engine clock change, when set, it uses another clock as the temporary clock (engine uses memory and vice versa) ++#define USE_MEMORY_SELF_REFRESH_MASK 0x02000000 //Only applicable to memory clock change, when set, using memory self refresh during clock transition ++#define SKIP_INTERNAL_MEMORY_PARAMETER_CHANGE 0x04000000 //Only applicable to memory clock change, when set, the table will skip predefined internal memory parameter change ++#define FIRST_TIME_CHANGE_CLOCK 0x08000000 //Applicable to both memory and engine clock change,when set, it means this is 1st time to change clock after ASIC bootup ++#define SKIP_SW_PROGRAM_PLL 0x10000000 //Applicable to both memory and engine clock change, when set, it means the table will not program SPLL/MPLL ++#define USE_SS_ENABLED_PIXEL_CLOCK USE_NON_BUS_CLOCK_MASK ++ ++#define b3USE_NON_BUS_CLOCK_MASK 0x01 //Applicable to both memory and engine clock change, when set, it uses another clock as the temporary clock (engine uses memory and vice versa) ++#define b3USE_MEMORY_SELF_REFRESH 0x02 //Only applicable to memory clock change, when set, using memory self refresh during clock transition ++#define b3SKIP_INTERNAL_MEMORY_PARAMETER_CHANGE 0x04 //Only applicable to memory clock change, when set, the table will skip predefined internal memory parameter change ++#define b3FIRST_TIME_CHANGE_CLOCK 0x08 //Applicable to both memory and engine clock change,when set, it means this is 1st time to change clock after ASIC bootup ++#define b3SKIP_SW_PROGRAM_PLL 0x10 //Applicable to both memory and engine clock change, when set, it means the table will not program SPLL/MPLL ++ ++typedef struct _ATOM_COMPUTE_CLOCK_FREQ ++{ ++#if ATOM_BIG_ENDIAN ++ ULONG ulComputeClockFlag:8; // =1: COMPUTE_MEMORY_PLL_PARAM, =2: COMPUTE_ENGINE_PLL_PARAM ++ ULONG ulClockFreq:24; // in unit of 10kHz ++#else ++ ULONG ulClockFreq:24; // in unit of 10kHz ++ ULONG ulComputeClockFlag:8; // =1: COMPUTE_MEMORY_PLL_PARAM, =2: COMPUTE_ENGINE_PLL_PARAM ++#endif ++}ATOM_COMPUTE_CLOCK_FREQ; ++ ++typedef struct _ATOM_S_MPLL_FB_DIVIDER ++{ ++ USHORT usFbDivFrac; ++ USHORT usFbDiv; ++}ATOM_S_MPLL_FB_DIVIDER; ++ ++typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3 ++{ ++ union ++ { ++ ATOM_COMPUTE_CLOCK_FREQ ulClock; //Input Parameter ++ ATOM_S_MPLL_FB_DIVIDER ulFbDiv; //Output Parameter ++ }; ++ UCHAR ucRefDiv; //Output Parameter ++ UCHAR ucPostDiv; //Output Parameter ++ UCHAR ucCntlFlag; //Output Parameter ++ UCHAR ucReserved; ++}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3; ++ ++// ucCntlFlag ++#define ATOM_PLL_CNTL_FLAG_PLL_POST_DIV_EN 1 ++#define ATOM_PLL_CNTL_FLAG_MPLL_VCO_MODE 2 ++#define ATOM_PLL_CNTL_FLAG_FRACTION_DISABLE 4 ++ ++typedef struct _DYNAMICE_MEMORY_SETTINGS_PARAMETER ++{ ++ ATOM_COMPUTE_CLOCK_FREQ ulClock; ++ ULONG ulReserved[2]; ++}DYNAMICE_MEMORY_SETTINGS_PARAMETER; ++ ++typedef struct _DYNAMICE_ENGINE_SETTINGS_PARAMETER ++{ ++ ATOM_COMPUTE_CLOCK_FREQ ulClock; ++ ULONG ulMemoryClock; ++ ULONG ulReserved; ++}DYNAMICE_ENGINE_SETTINGS_PARAMETER; ++ ++/****************************************************************************/ ++// Structures used by SetEngineClockTable ++/****************************************************************************/ ++typedef struct _SET_ENGINE_CLOCK_PARAMETERS ++{ ++ ULONG ulTargetEngineClock; //In 10Khz unit ++}SET_ENGINE_CLOCK_PARAMETERS; ++ ++typedef struct _SET_ENGINE_CLOCK_PS_ALLOCATION ++{ ++ ULONG ulTargetEngineClock; //In 10Khz unit ++ COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION sReserved; ++}SET_ENGINE_CLOCK_PS_ALLOCATION; ++ ++/****************************************************************************/ ++// Structures used by SetMemoryClockTable ++/****************************************************************************/ ++typedef struct _SET_MEMORY_CLOCK_PARAMETERS ++{ ++ ULONG ulTargetMemoryClock; //In 10Khz unit ++}SET_MEMORY_CLOCK_PARAMETERS; ++ ++typedef struct _SET_MEMORY_CLOCK_PS_ALLOCATION ++{ ++ ULONG ulTargetMemoryClock; //In 10Khz unit ++ COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION sReserved; ++}SET_MEMORY_CLOCK_PS_ALLOCATION; ++ ++/****************************************************************************/ ++// Structures used by ASIC_Init.ctb ++/****************************************************************************/ ++typedef struct _ASIC_INIT_PARAMETERS ++{ ++ ULONG ulDefaultEngineClock; //In 10Khz unit ++ ULONG ulDefaultMemoryClock; //In 10Khz unit ++}ASIC_INIT_PARAMETERS; ++ ++typedef struct _ASIC_INIT_PS_ALLOCATION ++{ ++ ASIC_INIT_PARAMETERS sASICInitClocks; ++ SET_ENGINE_CLOCK_PS_ALLOCATION sReserved; //Caller doesn't need to init this structure ++}ASIC_INIT_PS_ALLOCATION; ++ ++/****************************************************************************/ ++// Structure used by DynamicClockGatingTable.ctb ++/****************************************************************************/ ++typedef struct _DYNAMIC_CLOCK_GATING_PARAMETERS ++{ ++ UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE ++ UCHAR ucPadding[3]; ++}DYNAMIC_CLOCK_GATING_PARAMETERS; ++#define DYNAMIC_CLOCK_GATING_PS_ALLOCATION DYNAMIC_CLOCK_GATING_PARAMETERS ++ ++/****************************************************************************/ ++// Structure used by EnableASIC_StaticPwrMgtTable.ctb ++/****************************************************************************/ ++typedef struct _ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS ++{ ++ UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE ++ UCHAR ucPadding[3]; ++}ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS; ++#define ENABLE_ASIC_STATIC_PWR_MGT_PS_ALLOCATION ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS ++ ++/****************************************************************************/ ++// Structures used by DAC_LoadDetectionTable.ctb ++/****************************************************************************/ ++typedef struct _DAC_LOAD_DETECTION_PARAMETERS ++{ ++ USHORT usDeviceID; //{ATOM_DEVICE_CRTx_SUPPORT,ATOM_DEVICE_TVx_SUPPORT,ATOM_DEVICE_CVx_SUPPORT} ++ UCHAR ucDacType; //{ATOM_DAC_A,ATOM_DAC_B, ATOM_EXT_DAC} ++ UCHAR ucMisc; //Valid only when table revision =1.3 and above ++}DAC_LOAD_DETECTION_PARAMETERS; ++ ++// DAC_LOAD_DETECTION_PARAMETERS.ucMisc ++#define DAC_LOAD_MISC_YPrPb 0x01 ++ ++typedef struct _DAC_LOAD_DETECTION_PS_ALLOCATION ++{ ++ DAC_LOAD_DETECTION_PARAMETERS sDacload; ++ ULONG Reserved[2];// Don't set this one, allocation for EXT DAC ++}DAC_LOAD_DETECTION_PS_ALLOCATION; ++ ++/****************************************************************************/ ++// Structures used by DAC1EncoderControlTable.ctb and DAC2EncoderControlTable.ctb ++/****************************************************************************/ ++typedef struct _DAC_ENCODER_CONTROL_PARAMETERS ++{ ++ USHORT usPixelClock; // in 10KHz; for bios convenient ++ UCHAR ucDacStandard; // See definition of ATOM_DACx_xxx, For DEC3.0, bit 7 used as internal flag to indicate DAC2 (==1) or DAC1 (==0) ++ UCHAR ucAction; // 0: turn off encoder ++ // 1: setup and turn on encoder ++ // 7: ATOM_ENCODER_INIT Initialize DAC ++}DAC_ENCODER_CONTROL_PARAMETERS; ++ ++#define DAC_ENCODER_CONTROL_PS_ALLOCATION DAC_ENCODER_CONTROL_PARAMETERS ++ ++/****************************************************************************/ ++// Structures used by DIG1EncoderControlTable ++// DIG2EncoderControlTable ++// ExternalEncoderControlTable ++/****************************************************************************/ ++typedef struct _DIG_ENCODER_CONTROL_PARAMETERS ++{ ++ USHORT usPixelClock; // in 10KHz; for bios convenient ++ UCHAR ucConfig; ++ // [2] Link Select: ++ // =0: PHY linkA if bfLane<3 ++ // =1: PHY linkB if bfLanes<3 ++ // =0: PHY linkA+B if bfLanes=3 ++ // [3] Transmitter Sel ++ // =0: UNIPHY or PCIEPHY ++ // =1: LVTMA ++ UCHAR ucAction; // =0: turn off encoder ++ // =1: turn on encoder ++ UCHAR ucEncoderMode; ++ // =0: DP encoder ++ // =1: LVDS encoder ++ // =2: DVI encoder ++ // =3: HDMI encoder ++ // =4: SDVO encoder ++ UCHAR ucLaneNum; // how many lanes to enable ++ UCHAR ucReserved[2]; ++}DIG_ENCODER_CONTROL_PARAMETERS; ++#define DIG_ENCODER_CONTROL_PS_ALLOCATION DIG_ENCODER_CONTROL_PARAMETERS ++#define EXTERNAL_ENCODER_CONTROL_PARAMETER DIG_ENCODER_CONTROL_PARAMETERS ++ ++//ucConfig ++#define ATOM_ENCODER_CONFIG_DPLINKRATE_MASK 0x01 ++#define ATOM_ENCODER_CONFIG_DPLINKRATE_1_62GHZ 0x00 ++#define ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ 0x01 ++#define ATOM_ENCODER_CONFIG_LINK_SEL_MASK 0x04 ++#define ATOM_ENCODER_CONFIG_LINKA 0x00 ++#define ATOM_ENCODER_CONFIG_LINKB 0x04 ++#define ATOM_ENCODER_CONFIG_LINKA_B ATOM_TRANSMITTER_CONFIG_LINKA ++#define ATOM_ENCODER_CONFIG_LINKB_A ATOM_ENCODER_CONFIG_LINKB ++#define ATOM_ENCODER_CONFIG_TRANSMITTER_SEL_MASK 0x08 ++#define ATOM_ENCODER_CONFIG_UNIPHY 0x00 ++#define ATOM_ENCODER_CONFIG_LVTMA 0x08 ++#define ATOM_ENCODER_CONFIG_TRANSMITTER1 0x00 ++#define ATOM_ENCODER_CONFIG_TRANSMITTER2 0x08 ++#define ATOM_ENCODER_CONFIG_DIGB 0x80 // VBIOS Internal use, outside SW should set this bit=0 ++// ucAction ++// ATOM_ENABLE: Enable Encoder ++// ATOM_DISABLE: Disable Encoder ++ ++//ucEncoderMode ++#define ATOM_ENCODER_MODE_DP 0 ++#define ATOM_ENCODER_MODE_LVDS 1 ++#define ATOM_ENCODER_MODE_DVI 2 ++#define ATOM_ENCODER_MODE_HDMI 3 ++#define ATOM_ENCODER_MODE_SDVO 4 ++#define ATOM_ENCODER_MODE_TV 13 ++#define ATOM_ENCODER_MODE_CV 14 ++#define ATOM_ENCODER_MODE_CRT 15 ++ ++typedef struct _ATOM_DIG_ENCODER_CONFIG_V2 ++{ ++#if ATOM_BIG_ENDIAN ++ UCHAR ucReserved1:2; ++ UCHAR ucTransmitterSel:2; // =0: UniphyAB, =1: UniphyCD =2: UniphyEF ++ UCHAR ucLinkSel:1; // =0: linkA/C/E =1: linkB/D/F ++ UCHAR ucReserved:1; ++ UCHAR ucDPLinkRate:1; // =0: 1.62Ghz, =1: 2.7Ghz ++#else ++ UCHAR ucDPLinkRate:1; // =0: 1.62Ghz, =1: 2.7Ghz ++ UCHAR ucReserved:1; ++ UCHAR ucLinkSel:1; // =0: linkA/C/E =1: linkB/D/F ++ UCHAR ucTransmitterSel:2; // =0: UniphyAB, =1: UniphyCD =2: UniphyEF ++ UCHAR ucReserved1:2; ++#endif ++}ATOM_DIG_ENCODER_CONFIG_V2; ++ ++ ++typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V2 ++{ ++ USHORT usPixelClock; // in 10KHz; for bios convenient ++ ATOM_DIG_ENCODER_CONFIG_V2 acConfig; ++ UCHAR ucAction; ++ UCHAR ucEncoderMode; ++ // =0: DP encoder ++ // =1: LVDS encoder ++ // =2: DVI encoder ++ // =3: HDMI encoder ++ // =4: SDVO encoder ++ UCHAR ucLaneNum; // how many lanes to enable ++ UCHAR ucReserved[2]; ++}DIG_ENCODER_CONTROL_PARAMETERS_V2; ++ ++//ucConfig ++#define ATOM_ENCODER_CONFIG_V2_DPLINKRATE_MASK 0x01 ++#define ATOM_ENCODER_CONFIG_V2_DPLINKRATE_1_62GHZ 0x00 ++#define ATOM_ENCODER_CONFIG_V2_DPLINKRATE_2_70GHZ 0x01 ++#define ATOM_ENCODER_CONFIG_V2_LINK_SEL_MASK 0x04 ++#define ATOM_ENCODER_CONFIG_V2_LINKA 0x00 ++#define ATOM_ENCODER_CONFIG_V2_LINKB 0x04 ++#define ATOM_ENCODER_CONFIG_V2_TRANSMITTER_SEL_MASK 0x18 ++#define ATOM_ENCODER_CONFIG_V2_TRANSMITTER1 0x00 ++#define ATOM_ENCODER_CONFIG_V2_TRANSMITTER2 0x08 ++#define ATOM_ENCODER_CONFIG_V2_TRANSMITTER3 0x10 ++ ++/****************************************************************************/ ++// Structures used by UNIPHYTransmitterControlTable ++// LVTMATransmitterControlTable ++// DVOOutputControlTable ++/****************************************************************************/ ++typedef struct _ATOM_DP_VS_MODE ++{ ++ UCHAR ucLaneSel; ++ UCHAR ucLaneSet; ++}ATOM_DP_VS_MODE; ++ ++typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS ++{ ++ union ++ { ++ USHORT usPixelClock; // in 10KHz; for bios convenient ++ USHORT usInitInfo; // when init uniphy,lower 8bit is used for connector type defined in objectid.h ++ ATOM_DP_VS_MODE asMode; // DP Voltage swing mode ++ }; ++ UCHAR ucConfig; ++ // [0]=0: 4 lane Link, ++ // =1: 8 lane Link ( Dual Links TMDS ) ++ // [1]=0: InCoherent mode ++ // =1: Coherent Mode ++ // [2] Link Select: ++ // =0: PHY linkA if bfLane<3 ++ // =1: PHY linkB if bfLanes<3 ++ // =0: PHY linkA+B if bfLanes=3 ++ // [5:4]PCIE lane Sel ++ // =0: lane 0~3 or 0~7 ++ // =1: lane 4~7 ++ // =2: lane 8~11 or 8~15 ++ // =3: lane 12~15 ++ UCHAR ucAction; // =0: turn off encoder ++ // =1: turn on encoder ++ UCHAR ucReserved[4]; ++}DIG_TRANSMITTER_CONTROL_PARAMETERS; ++ ++#define DIG_TRANSMITTER_CONTROL_PS_ALLOCATION DIG_TRANSMITTER_CONTROL_PARAMETERS ++ ++//ucInitInfo ++#define ATOM_TRAMITTER_INITINFO_CONNECTOR_MASK 0x00ff ++ ++//ucConfig ++#define ATOM_TRANSMITTER_CONFIG_8LANE_LINK 0x01 ++#define ATOM_TRANSMITTER_CONFIG_COHERENT 0x02 ++#define ATOM_TRANSMITTER_CONFIG_LINK_SEL_MASK 0x04 ++#define ATOM_TRANSMITTER_CONFIG_LINKA 0x00 ++#define ATOM_TRANSMITTER_CONFIG_LINKB 0x04 ++#define ATOM_TRANSMITTER_CONFIG_LINKA_B 0x00 ++#define ATOM_TRANSMITTER_CONFIG_LINKB_A 0x04 ++ ++#define ATOM_TRANSMITTER_CONFIG_ENCODER_SEL_MASK 0x08 // only used when ATOM_TRANSMITTER_ACTION_ENABLE ++#define ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER 0x00 // only used when ATOM_TRANSMITTER_ACTION_ENABLE ++#define ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER 0x08 // only used when ATOM_TRANSMITTER_ACTION_ENABLE ++ ++#define ATOM_TRANSMITTER_CONFIG_CLKSRC_MASK 0x30 ++#define ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL 0x00 ++#define ATOM_TRANSMITTER_CONFIG_CLKSRC_PCIE 0x20 ++#define ATOM_TRANSMITTER_CONFIG_CLKSRC_XTALIN 0x30 ++#define ATOM_TRANSMITTER_CONFIG_LANE_SEL_MASK 0xc0 ++#define ATOM_TRANSMITTER_CONFIG_LANE_0_3 0x00 ++#define ATOM_TRANSMITTER_CONFIG_LANE_0_7 0x00 ++#define ATOM_TRANSMITTER_CONFIG_LANE_4_7 0x40 ++#define ATOM_TRANSMITTER_CONFIG_LANE_8_11 0x80 ++#define ATOM_TRANSMITTER_CONFIG_LANE_8_15 0x80 ++#define ATOM_TRANSMITTER_CONFIG_LANE_12_15 0xc0 ++ ++//ucAction ++#define ATOM_TRANSMITTER_ACTION_DISABLE 0 ++#define ATOM_TRANSMITTER_ACTION_ENABLE 1 ++#define ATOM_TRANSMITTER_ACTION_LCD_BLOFF 2 ++#define ATOM_TRANSMITTER_ACTION_LCD_BLON 3 ++#define ATOM_TRANSMITTER_ACTION_BL_BRIGHTNESS_CONTROL 4 ++#define ATOM_TRANSMITTER_ACTION_LCD_SELFTEST_START 5 ++#define ATOM_TRANSMITTER_ACTION_LCD_SELFTEST_STOP 6 ++#define ATOM_TRANSMITTER_ACTION_INIT 7 ++#define ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT 8 ++#define ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT 9 ++#define ATOM_TRANSMITTER_ACTION_SETUP 10 ++#define ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH 11 ++ ++ ++// Following are used for DigTransmitterControlTable ver1.2 ++typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V2 ++{ ++#if ATOM_BIG_ENDIAN ++ UCHAR ucTransmitterSel:2; //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB ) ++ // =1 Dig Transmitter 2 ( Uniphy CD ) ++ // =2 Dig Transmitter 3 ( Uniphy EF ) ++ UCHAR ucReserved:1; ++ UCHAR fDPConnector:1; //bit4=0: DP connector =1: None DP connector ++ UCHAR ucEncoderSel:1; //bit3=0: Data/Clk path source from DIGA( DIG inst0 ). =1: Data/clk path source from DIGB ( DIG inst1 ) ++ UCHAR ucLinkSel:1; //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E ++ // =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F ++ ++ UCHAR fCoherentMode:1; //bit1=1: Coherent Mode ( for DVI/HDMI mode ) ++ UCHAR fDualLinkConnector:1; //bit0=1: Dual Link DVI connector ++#else ++ UCHAR fDualLinkConnector:1; //bit0=1: Dual Link DVI connector ++ UCHAR fCoherentMode:1; //bit1=1: Coherent Mode ( for DVI/HDMI mode ) ++ UCHAR ucLinkSel:1; //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E ++ // =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F ++ UCHAR ucEncoderSel:1; //bit3=0: Data/Clk path source from DIGA( DIG inst0 ). =1: Data/clk path source from DIGB ( DIG inst1 ) ++ UCHAR fDPConnector:1; //bit4=0: DP connector =1: None DP connector ++ UCHAR ucReserved:1; ++ UCHAR ucTransmitterSel:2; //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB ) ++ // =1 Dig Transmitter 2 ( Uniphy CD ) ++ // =2 Dig Transmitter 3 ( Uniphy EF ) ++#endif ++}ATOM_DIG_TRANSMITTER_CONFIG_V2; ++ ++//ucConfig ++//Bit0 ++#define ATOM_TRANSMITTER_CONFIG_V2_DUAL_LINK_CONNECTOR 0x01 ++ ++//Bit1 ++#define ATOM_TRANSMITTER_CONFIG_V2_COHERENT 0x02 ++ ++//Bit2 ++#define ATOM_TRANSMITTER_CONFIG_V2_LINK_SEL_MASK 0x04 ++#define ATOM_TRANSMITTER_CONFIG_V2_LINKA 0x00 ++#define ATOM_TRANSMITTER_CONFIG_V2_LINKB 0x04 ++ ++// Bit3 ++#define ATOM_TRANSMITTER_CONFIG_V2_ENCODER_SEL_MASK 0x08 ++#define ATOM_TRANSMITTER_CONFIG_V2_DIG1_ENCODER 0x00 // only used when ucAction == ATOM_TRANSMITTER_ACTION_ENABLE or ATOM_TRANSMITTER_ACTION_SETUP ++#define ATOM_TRANSMITTER_CONFIG_V2_DIG2_ENCODER 0x08 // only used when ucAction == ATOM_TRANSMITTER_ACTION_ENABLE or ATOM_TRANSMITTER_ACTION_SETUP ++ ++// Bit4 ++#define ATOM_TRASMITTER_CONFIG_V2_DP_CONNECTOR 0x10 ++ ++// Bit7:6 ++#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER_SEL_MASK 0xC0 ++#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER1 0x00 //AB ++#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER2 0x40 //CD ++#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER3 0x80 //EF ++ ++typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 ++{ ++ union ++ { ++ USHORT usPixelClock; // in 10KHz; for bios convenient ++ USHORT usInitInfo; // when init uniphy,lower 8bit is used for connector type defined in objectid.h ++ ATOM_DP_VS_MODE asMode; // DP Voltage swing mode ++ }; ++ ATOM_DIG_TRANSMITTER_CONFIG_V2 acConfig; ++ UCHAR ucAction; // define as ATOM_TRANSMITER_ACTION_XXX ++ UCHAR ucReserved[4]; ++}DIG_TRANSMITTER_CONTROL_PARAMETERS_V2; ++ ++ ++/****************************************************************************/ ++// Structures used by DAC1OuputControlTable ++// DAC2OuputControlTable ++// LVTMAOutputControlTable (Before DEC30) ++// TMDSAOutputControlTable (Before DEC30) ++/****************************************************************************/ ++typedef struct _DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS ++{ ++ UCHAR ucAction; // Possible input:ATOM_ENABLE||ATOMDISABLE ++ // When the display is LCD, in addition to above: ++ // ATOM_LCD_BLOFF|| ATOM_LCD_BLON ||ATOM_LCD_BL_BRIGHTNESS_CONTROL||ATOM_LCD_SELFTEST_START|| ++ // ATOM_LCD_SELFTEST_STOP ++ ++ UCHAR aucPadding[3]; // padding to DWORD aligned ++}DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS; ++ ++#define DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS ++ ++ ++#define CRT1_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS ++#define CRT1_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION ++ ++#define CRT2_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS ++#define CRT2_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION ++ ++#define CV1_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS ++#define CV1_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION ++ ++#define TV1_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS ++#define TV1_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION ++ ++#define DFP1_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS ++#define DFP1_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION ++ ++#define DFP2_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS ++#define DFP2_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION ++ ++#define LCD1_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS ++#define LCD1_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION ++ ++#define DVO_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS ++#define DVO_OUTPUT_CONTROL_PS_ALLOCATION DIG_TRANSMITTER_CONTROL_PS_ALLOCATION ++#define DVO_OUTPUT_CONTROL_PARAMETERS_V3 DIG_TRANSMITTER_CONTROL_PARAMETERS ++ ++/****************************************************************************/ ++// Structures used by BlankCRTCTable ++/****************************************************************************/ ++typedef struct _BLANK_CRTC_PARAMETERS ++{ ++ UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2 ++ UCHAR ucBlanking; // ATOM_BLANKING or ATOM_BLANKINGOFF ++ USHORT usBlackColorRCr; ++ USHORT usBlackColorGY; ++ USHORT usBlackColorBCb; ++}BLANK_CRTC_PARAMETERS; ++#define BLANK_CRTC_PS_ALLOCATION BLANK_CRTC_PARAMETERS ++ ++/****************************************************************************/ ++// Structures used by EnableCRTCTable ++// EnableCRTCMemReqTable ++// UpdateCRTC_DoubleBufferRegistersTable ++/****************************************************************************/ ++typedef struct _ENABLE_CRTC_PARAMETERS ++{ ++ UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2 ++ UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE ++ UCHAR ucPadding[2]; ++}ENABLE_CRTC_PARAMETERS; ++#define ENABLE_CRTC_PS_ALLOCATION ENABLE_CRTC_PARAMETERS ++ ++/****************************************************************************/ ++// Structures used by SetCRTC_OverScanTable ++/****************************************************************************/ ++typedef struct _SET_CRTC_OVERSCAN_PARAMETERS ++{ ++ USHORT usOverscanRight; // right ++ USHORT usOverscanLeft; // left ++ USHORT usOverscanBottom; // bottom ++ USHORT usOverscanTop; // top ++ UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2 ++ UCHAR ucPadding[3]; ++}SET_CRTC_OVERSCAN_PARAMETERS; ++#define SET_CRTC_OVERSCAN_PS_ALLOCATION SET_CRTC_OVERSCAN_PARAMETERS ++ ++/****************************************************************************/ ++// Structures used by SetCRTC_ReplicationTable ++/****************************************************************************/ ++typedef struct _SET_CRTC_REPLICATION_PARAMETERS ++{ ++ UCHAR ucH_Replication; // horizontal replication ++ UCHAR ucV_Replication; // vertical replication ++ UCHAR usCRTC; // ATOM_CRTC1 or ATOM_CRTC2 ++ UCHAR ucPadding; ++}SET_CRTC_REPLICATION_PARAMETERS; ++#define SET_CRTC_REPLICATION_PS_ALLOCATION SET_CRTC_REPLICATION_PARAMETERS ++ ++/****************************************************************************/ ++// Structures used by SelectCRTC_SourceTable ++/****************************************************************************/ ++typedef struct _SELECT_CRTC_SOURCE_PARAMETERS ++{ ++ UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2 ++ UCHAR ucDevice; // ATOM_DEVICE_CRT1|ATOM_DEVICE_CRT2|.... ++ UCHAR ucPadding[2]; ++}SELECT_CRTC_SOURCE_PARAMETERS; ++#define SELECT_CRTC_SOURCE_PS_ALLOCATION SELECT_CRTC_SOURCE_PARAMETERS ++ ++typedef struct _SELECT_CRTC_SOURCE_PARAMETERS_V2 ++{ ++ UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2 ++ UCHAR ucEncoderID; // DAC1/DAC2/TVOUT/DIG1/DIG2/DVO ++ UCHAR ucEncodeMode; // Encoding mode, only valid when using DIG1/DIG2/DVO ++ UCHAR ucPadding; ++}SELECT_CRTC_SOURCE_PARAMETERS_V2; ++ ++//ucEncoderID ++//#define ASIC_INT_DAC1_ENCODER_ID 0x00 ++//#define ASIC_INT_TV_ENCODER_ID 0x02 ++//#define ASIC_INT_DIG1_ENCODER_ID 0x03 ++//#define ASIC_INT_DAC2_ENCODER_ID 0x04 ++//#define ASIC_EXT_TV_ENCODER_ID 0x06 ++//#define ASIC_INT_DVO_ENCODER_ID 0x07 ++//#define ASIC_INT_DIG2_ENCODER_ID 0x09 ++//#define ASIC_EXT_DIG_ENCODER_ID 0x05 ++ ++//ucEncodeMode ++//#define ATOM_ENCODER_MODE_DP 0 ++//#define ATOM_ENCODER_MODE_LVDS 1 ++//#define ATOM_ENCODER_MODE_DVI 2 ++//#define ATOM_ENCODER_MODE_HDMI 3 ++//#define ATOM_ENCODER_MODE_SDVO 4 ++//#define ATOM_ENCODER_MODE_TV 13 ++//#define ATOM_ENCODER_MODE_CV 14 ++//#define ATOM_ENCODER_MODE_CRT 15 ++ ++/****************************************************************************/ ++// Structures used by SetPixelClockTable ++// GetPixelClockTable ++/****************************************************************************/ ++//Major revision=1., Minor revision=1 ++typedef struct _PIXEL_CLOCK_PARAMETERS ++{ ++ USHORT usPixelClock; // in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div) ++ // 0 means disable PPLL ++ USHORT usRefDiv; // Reference divider ++ USHORT usFbDiv; // feedback divider ++ UCHAR ucPostDiv; // post divider ++ UCHAR ucFracFbDiv; // fractional feedback divider ++ UCHAR ucPpll; // ATOM_PPLL1 or ATOM_PPL2 ++ UCHAR ucRefDivSrc; // ATOM_PJITTER or ATO_NONPJITTER ++ UCHAR ucCRTC; // Which CRTC uses this Ppll ++ UCHAR ucPadding; ++}PIXEL_CLOCK_PARAMETERS; ++ ++//Major revision=1., Minor revision=2, add ucMiscIfno ++//ucMiscInfo: ++#define MISC_FORCE_REPROG_PIXEL_CLOCK 0x1 ++#define MISC_DEVICE_INDEX_MASK 0xF0 ++#define MISC_DEVICE_INDEX_SHIFT 4 ++ ++typedef struct _PIXEL_CLOCK_PARAMETERS_V2 ++{ ++ USHORT usPixelClock; // in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div) ++ // 0 means disable PPLL ++ USHORT usRefDiv; // Reference divider ++ USHORT usFbDiv; // feedback divider ++ UCHAR ucPostDiv; // post divider ++ UCHAR ucFracFbDiv; // fractional feedback divider ++ UCHAR ucPpll; // ATOM_PPLL1 or ATOM_PPL2 ++ UCHAR ucRefDivSrc; // ATOM_PJITTER or ATO_NONPJITTER ++ UCHAR ucCRTC; // Which CRTC uses this Ppll ++ UCHAR ucMiscInfo; // Different bits for different purpose, bit [7:4] as device index, bit[0]=Force prog ++}PIXEL_CLOCK_PARAMETERS_V2; ++ ++//Major revision=1., Minor revision=3, structure/definition change ++//ucEncoderMode: ++//ATOM_ENCODER_MODE_DP ++//ATOM_ENOCDER_MODE_LVDS ++//ATOM_ENOCDER_MODE_DVI ++//ATOM_ENOCDER_MODE_HDMI ++//ATOM_ENOCDER_MODE_SDVO ++//ATOM_ENCODER_MODE_TV 13 ++//ATOM_ENCODER_MODE_CV 14 ++//ATOM_ENCODER_MODE_CRT 15 ++ ++//ucDVOConfig ++//#define DVO_ENCODER_CONFIG_RATE_SEL 0x01 ++//#define DVO_ENCODER_CONFIG_DDR_SPEED 0x00 ++//#define DVO_ENCODER_CONFIG_SDR_SPEED 0x01 ++//#define DVO_ENCODER_CONFIG_OUTPUT_SEL 0x0c ++//#define DVO_ENCODER_CONFIG_LOW12BIT 0x00 ++//#define DVO_ENCODER_CONFIG_UPPER12BIT 0x04 ++//#define DVO_ENCODER_CONFIG_24BIT 0x08 ++ ++//ucMiscInfo: also changed, see below ++#define PIXEL_CLOCK_MISC_FORCE_PROG_PPLL 0x01 ++#define PIXEL_CLOCK_MISC_VGA_MODE 0x02 ++#define PIXEL_CLOCK_MISC_CRTC_SEL_MASK 0x04 ++#define PIXEL_CLOCK_MISC_CRTC_SEL_CRTC1 0x00 ++#define PIXEL_CLOCK_MISC_CRTC_SEL_CRTC2 0x04 ++#define PIXEL_CLOCK_MISC_USE_ENGINE_FOR_DISPCLK 0x08 ++ ++typedef struct _PIXEL_CLOCK_PARAMETERS_V3 ++{ ++ USHORT usPixelClock; // in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div) ++ // 0 means disable PPLL. For VGA PPLL,make sure this value is not 0. ++ USHORT usRefDiv; // Reference divider ++ USHORT usFbDiv; // feedback divider ++ UCHAR ucPostDiv; // post divider ++ UCHAR ucFracFbDiv; // fractional feedback divider ++ UCHAR ucPpll; // ATOM_PPLL1 or ATOM_PPL2 ++ UCHAR ucTransmitterId; // graphic encoder id defined in objectId.h ++ union ++ { ++ UCHAR ucEncoderMode; // encoder type defined as ATOM_ENCODER_MODE_DP/DVI/HDMI/ ++ UCHAR ucDVOConfig; // when use DVO, need to know SDR/DDR, 12bit or 24bit ++ }; ++ UCHAR ucMiscInfo; // bit[0]=Force program, bit[1]= set pclk for VGA, b[2]= CRTC sel ++ // bit[3]=0:use PPLL for dispclk source, =1: use engine clock for dispclock source ++}PIXEL_CLOCK_PARAMETERS_V3; ++ ++#define PIXEL_CLOCK_PARAMETERS_LAST PIXEL_CLOCK_PARAMETERS_V2 ++#define GET_PIXEL_CLOCK_PS_ALLOCATION PIXEL_CLOCK_PARAMETERS_LAST ++ ++/****************************************************************************/ ++// Structures used by AdjustDisplayPllTable ++/****************************************************************************/ ++typedef struct _ADJUST_DISPLAY_PLL_PARAMETERS ++{ ++ USHORT usPixelClock; ++ UCHAR ucTransmitterID; ++ UCHAR ucEncodeMode; ++ union ++ { ++ UCHAR ucDVOConfig; //if DVO, need passing link rate and output 12bitlow or 24bit ++ UCHAR ucConfig; //if none DVO, not defined yet ++ }; ++ UCHAR ucReserved[3]; ++}ADJUST_DISPLAY_PLL_PARAMETERS; ++ ++#define ADJUST_DISPLAY_CONFIG_SS_ENABLE 0x10 ++ ++#define ADJUST_DISPLAY_PLL_PS_ALLOCATION ADJUST_DISPLAY_PLL_PARAMETERS ++ ++/****************************************************************************/ ++// Structures used by EnableYUVTable ++/****************************************************************************/ ++typedef struct _ENABLE_YUV_PARAMETERS ++{ ++ UCHAR ucEnable; // ATOM_ENABLE:Enable YUV or ATOM_DISABLE:Disable YUV (RGB) ++ UCHAR ucCRTC; // Which CRTC needs this YUV or RGB format ++ UCHAR ucPadding[2]; ++}ENABLE_YUV_PARAMETERS; ++#define ENABLE_YUV_PS_ALLOCATION ENABLE_YUV_PARAMETERS ++ ++/****************************************************************************/ ++// Structures used by GetMemoryClockTable ++/****************************************************************************/ ++typedef struct _GET_MEMORY_CLOCK_PARAMETERS ++{ ++ ULONG ulReturnMemoryClock; // current memory speed in 10KHz unit ++} GET_MEMORY_CLOCK_PARAMETERS; ++#define GET_MEMORY_CLOCK_PS_ALLOCATION GET_MEMORY_CLOCK_PARAMETERS ++ ++/****************************************************************************/ ++// Structures used by GetEngineClockTable ++/****************************************************************************/ ++typedef struct _GET_ENGINE_CLOCK_PARAMETERS ++{ ++ ULONG ulReturnEngineClock; // current engine speed in 10KHz unit ++} GET_ENGINE_CLOCK_PARAMETERS; ++#define GET_ENGINE_CLOCK_PS_ALLOCATION GET_ENGINE_CLOCK_PARAMETERS ++ ++/****************************************************************************/ ++// Following Structures and constant may be obsolete ++/****************************************************************************/ ++//Maxium 8 bytes,the data read in will be placed in the parameter space. ++//Read operaion successeful when the paramter space is non-zero, otherwise read operation failed ++typedef struct _READ_EDID_FROM_HW_I2C_DATA_PARAMETERS ++{ ++ USHORT usPrescale; //Ratio between Engine clock and I2C clock ++ USHORT usVRAMAddress; //Adress in Frame Buffer where to pace raw EDID ++ USHORT usStatus; //When use output: lower byte EDID checksum, high byte hardware status ++ //WHen use input: lower byte as 'byte to read':currently limited to 128byte or 1byte ++ UCHAR ucSlaveAddr; //Read from which slave ++ UCHAR ucLineNumber; //Read from which HW assisted line ++}READ_EDID_FROM_HW_I2C_DATA_PARAMETERS; ++#define READ_EDID_FROM_HW_I2C_DATA_PS_ALLOCATION READ_EDID_FROM_HW_I2C_DATA_PARAMETERS ++ ++ ++#define ATOM_WRITE_I2C_FORMAT_PSOFFSET_PSDATABYTE 0 ++#define ATOM_WRITE_I2C_FORMAT_PSOFFSET_PSTWODATABYTES 1 ++#define ATOM_WRITE_I2C_FORMAT_PSCOUNTER_PSOFFSET_IDDATABLOCK 2 ++#define ATOM_WRITE_I2C_FORMAT_PSCOUNTER_IDOFFSET_PLUS_IDDATABLOCK 3 ++#define ATOM_WRITE_I2C_FORMAT_IDCOUNTER_IDOFFSET_IDDATABLOCK 4 ++ ++typedef struct _WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS ++{ ++ USHORT usPrescale; //Ratio between Engine clock and I2C clock ++ USHORT usByteOffset; //Write to which byte ++ //Upper portion of usByteOffset is Format of data ++ //1bytePS+offsetPS ++ //2bytesPS+offsetPS ++ //blockID+offsetPS ++ //blockID+offsetID ++ //blockID+counterID+offsetID ++ UCHAR ucData; //PS data1 ++ UCHAR ucStatus; //Status byte 1=success, 2=failure, Also is used as PS data2 ++ UCHAR ucSlaveAddr; //Write to which slave ++ UCHAR ucLineNumber; //Write from which HW assisted line ++}WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS; ++ ++#define WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS ++ ++typedef struct _SET_UP_HW_I2C_DATA_PARAMETERS ++{ ++ USHORT usPrescale; //Ratio between Engine clock and I2C clock ++ UCHAR ucSlaveAddr; //Write to which slave ++ UCHAR ucLineNumber; //Write from which HW assisted line ++}SET_UP_HW_I2C_DATA_PARAMETERS; ++ ++ ++/**************************************************************************/ ++#define SPEED_FAN_CONTROL_PS_ALLOCATION WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS ++ ++/****************************************************************************/ ++// Structures used by PowerConnectorDetectionTable ++/****************************************************************************/ ++typedef struct _POWER_CONNECTOR_DETECTION_PARAMETERS ++{ ++ UCHAR ucPowerConnectorStatus; //Used for return value 0: detected, 1:not detected ++ UCHAR ucPwrBehaviorId; ++ USHORT usPwrBudget; //how much power currently boot to in unit of watt ++}POWER_CONNECTOR_DETECTION_PARAMETERS; ++ ++typedef struct POWER_CONNECTOR_DETECTION_PS_ALLOCATION ++{ ++ UCHAR ucPowerConnectorStatus; //Used for return value 0: detected, 1:not detected ++ UCHAR ucReserved; ++ USHORT usPwrBudget; //how much power currently boot to in unit of watt ++ WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; ++}POWER_CONNECTOR_DETECTION_PS_ALLOCATION; ++ ++/****************************LVDS SS Command Table Definitions**********************/ ++ ++/****************************************************************************/ ++// Structures used by EnableSpreadSpectrumOnPPLLTable ++/****************************************************************************/ ++typedef struct _ENABLE_LVDS_SS_PARAMETERS ++{ ++ USHORT usSpreadSpectrumPercentage; ++ UCHAR ucSpreadSpectrumType; //Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD ++ UCHAR ucSpreadSpectrumStepSize_Delay; //bits3:2 SS_STEP_SIZE; bit 6:4 SS_DELAY ++ UCHAR ucEnable; //ATOM_ENABLE or ATOM_DISABLE ++ UCHAR ucPadding[3]; ++}ENABLE_LVDS_SS_PARAMETERS; ++ ++//ucTableFormatRevision=1,ucTableContentRevision=2 ++typedef struct _ENABLE_LVDS_SS_PARAMETERS_V2 ++{ ++ USHORT usSpreadSpectrumPercentage; ++ UCHAR ucSpreadSpectrumType; //Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD ++ UCHAR ucSpreadSpectrumStep; // ++ UCHAR ucEnable; //ATOM_ENABLE or ATOM_DISABLE ++ UCHAR ucSpreadSpectrumDelay; ++ UCHAR ucSpreadSpectrumRange; ++ UCHAR ucPadding; ++}ENABLE_LVDS_SS_PARAMETERS_V2; ++ ++//This new structure is based on ENABLE_LVDS_SS_PARAMETERS but expands to SS on PPLL, so other devices can use SS. ++typedef struct _ENABLE_SPREAD_SPECTRUM_ON_PPLL ++{ ++ USHORT usSpreadSpectrumPercentage; ++ UCHAR ucSpreadSpectrumType; // Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD ++ UCHAR ucSpreadSpectrumStep; // ++ UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE ++ UCHAR ucSpreadSpectrumDelay; ++ UCHAR ucSpreadSpectrumRange; ++ UCHAR ucPpll; // ATOM_PPLL1/ATOM_PPLL2 ++}ENABLE_SPREAD_SPECTRUM_ON_PPLL; ++ ++#define ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION ENABLE_SPREAD_SPECTRUM_ON_PPLL ++ ++/**************************************************************************/ ++ ++typedef struct _SET_PIXEL_CLOCK_PS_ALLOCATION ++{ ++ PIXEL_CLOCK_PARAMETERS sPCLKInput; ++ ENABLE_SPREAD_SPECTRUM_ON_PPLL sReserved;//Caller doesn't need to init this portion ++}SET_PIXEL_CLOCK_PS_ALLOCATION; ++ ++#define ENABLE_VGA_RENDER_PS_ALLOCATION SET_PIXEL_CLOCK_PS_ALLOCATION ++ ++/****************************************************************************/ ++// Structures used by ### ++/****************************************************************************/ ++typedef struct _MEMORY_TRAINING_PARAMETERS ++{ ++ ULONG ulTargetMemoryClock; //In 10Khz unit ++}MEMORY_TRAINING_PARAMETERS; ++#define MEMORY_TRAINING_PS_ALLOCATION MEMORY_TRAINING_PARAMETERS ++ ++ ++/****************************LVDS and other encoder command table definitions **********************/ ++ ++ ++/****************************************************************************/ ++// Structures used by LVDSEncoderControlTable (Before DCE30) ++// LVTMAEncoderControlTable (Before DCE30) ++// TMDSAEncoderControlTable (Before DCE30) ++/****************************************************************************/ ++typedef struct _LVDS_ENCODER_CONTROL_PARAMETERS ++{ ++ USHORT usPixelClock; // in 10KHz; for bios convenient ++ UCHAR ucMisc; // bit0=0: Enable single link ++ // =1: Enable dual link ++ // Bit1=0: 666RGB ++ // =1: 888RGB ++ UCHAR ucAction; // 0: turn off encoder ++ // 1: setup and turn on encoder ++}LVDS_ENCODER_CONTROL_PARAMETERS; ++ ++#define LVDS_ENCODER_CONTROL_PS_ALLOCATION LVDS_ENCODER_CONTROL_PARAMETERS ++ ++#define TMDS1_ENCODER_CONTROL_PARAMETERS LVDS_ENCODER_CONTROL_PARAMETERS ++#define TMDS1_ENCODER_CONTROL_PS_ALLOCATION TMDS1_ENCODER_CONTROL_PARAMETERS ++ ++#define TMDS2_ENCODER_CONTROL_PARAMETERS TMDS1_ENCODER_CONTROL_PARAMETERS ++#define TMDS2_ENCODER_CONTROL_PS_ALLOCATION TMDS2_ENCODER_CONTROL_PARAMETERS ++ ++ ++//ucTableFormatRevision=1,ucTableContentRevision=2 ++typedef struct _LVDS_ENCODER_CONTROL_PARAMETERS_V2 ++{ ++ USHORT usPixelClock; // in 10KHz; for bios convenient ++ UCHAR ucMisc; // see PANEL_ENCODER_MISC_xx defintions below ++ UCHAR ucAction; // 0: turn off encoder ++ // 1: setup and turn on encoder ++ UCHAR ucTruncate; // bit0=0: Disable truncate ++ // =1: Enable truncate ++ // bit4=0: 666RGB ++ // =1: 888RGB ++ UCHAR ucSpatial; // bit0=0: Disable spatial dithering ++ // =1: Enable spatial dithering ++ // bit4=0: 666RGB ++ // =1: 888RGB ++ UCHAR ucTemporal; // bit0=0: Disable temporal dithering ++ // =1: Enable temporal dithering ++ // bit4=0: 666RGB ++ // =1: 888RGB ++ // bit5=0: Gray level 2 ++ // =1: Gray level 4 ++ UCHAR ucFRC; // bit4=0: 25FRC_SEL pattern E ++ // =1: 25FRC_SEL pattern F ++ // bit6:5=0: 50FRC_SEL pattern A ++ // =1: 50FRC_SEL pattern B ++ // =2: 50FRC_SEL pattern C ++ // =3: 50FRC_SEL pattern D ++ // bit7=0: 75FRC_SEL pattern E ++ // =1: 75FRC_SEL pattern F ++}LVDS_ENCODER_CONTROL_PARAMETERS_V2; ++ ++#define LVDS_ENCODER_CONTROL_PS_ALLOCATION_V2 LVDS_ENCODER_CONTROL_PARAMETERS_V2 ++ ++#define TMDS1_ENCODER_CONTROL_PARAMETERS_V2 LVDS_ENCODER_CONTROL_PARAMETERS_V2 ++#define TMDS1_ENCODER_CONTROL_PS_ALLOCATION_V2 TMDS1_ENCODER_CONTROL_PARAMETERS_V2 ++ ++#define TMDS2_ENCODER_CONTROL_PARAMETERS_V2 TMDS1_ENCODER_CONTROL_PARAMETERS_V2 ++#define TMDS2_ENCODER_CONTROL_PS_ALLOCATION_V2 TMDS2_ENCODER_CONTROL_PARAMETERS_V2 ++ ++#define LVDS_ENCODER_CONTROL_PARAMETERS_V3 LVDS_ENCODER_CONTROL_PARAMETERS_V2 ++#define LVDS_ENCODER_CONTROL_PS_ALLOCATION_V3 LVDS_ENCODER_CONTROL_PARAMETERS_V3 ++ ++#define TMDS1_ENCODER_CONTROL_PARAMETERS_V3 LVDS_ENCODER_CONTROL_PARAMETERS_V3 ++#define TMDS1_ENCODER_CONTROL_PS_ALLOCATION_V3 TMDS1_ENCODER_CONTROL_PARAMETERS_V3 ++ ++#define TMDS2_ENCODER_CONTROL_PARAMETERS_V3 LVDS_ENCODER_CONTROL_PARAMETERS_V3 ++#define TMDS2_ENCODER_CONTROL_PS_ALLOCATION_V3 TMDS2_ENCODER_CONTROL_PARAMETERS_V3 ++ ++/****************************************************************************/ ++// Structures used by ### ++/****************************************************************************/ ++typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS ++{ ++ UCHAR ucEnable; // Enable or Disable External TMDS encoder ++ UCHAR ucMisc; // Bit0=0:Enable Single link;=1:Enable Dual link;Bit1 {=0:666RGB, =1:888RGB} ++ UCHAR ucPadding[2]; ++}ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS; ++ ++typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION ++{ ++ ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS sXTmdsEncoder; ++ WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; //Caller doesn't need to init this portion ++}ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION; ++ ++#define ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS_V2 LVDS_ENCODER_CONTROL_PARAMETERS_V2 ++ ++typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION_V2 ++{ ++ ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS_V2 sXTmdsEncoder; ++ WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; //Caller doesn't need to init this portion ++}ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION_V2; ++ ++typedef struct _EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION ++{ ++ DIG_ENCODER_CONTROL_PARAMETERS sDigEncoder; ++ WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; ++}EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION; ++ ++/****************************************************************************/ ++// Structures used by DVOEncoderControlTable ++/****************************************************************************/ ++//ucTableFormatRevision=1,ucTableContentRevision=3 ++ ++//ucDVOConfig: ++#define DVO_ENCODER_CONFIG_RATE_SEL 0x01 ++#define DVO_ENCODER_CONFIG_DDR_SPEED 0x00 ++#define DVO_ENCODER_CONFIG_SDR_SPEED 0x01 ++#define DVO_ENCODER_CONFIG_OUTPUT_SEL 0x0c ++#define DVO_ENCODER_CONFIG_LOW12BIT 0x00 ++#define DVO_ENCODER_CONFIG_UPPER12BIT 0x04 ++#define DVO_ENCODER_CONFIG_24BIT 0x08 ++ ++typedef struct _DVO_ENCODER_CONTROL_PARAMETERS_V3 ++{ ++ USHORT usPixelClock; ++ UCHAR ucDVOConfig; ++ UCHAR ucAction; //ATOM_ENABLE/ATOM_DISABLE/ATOM_HPD_INIT ++ UCHAR ucReseved[4]; ++}DVO_ENCODER_CONTROL_PARAMETERS_V3; ++#define DVO_ENCODER_CONTROL_PS_ALLOCATION_V3 DVO_ENCODER_CONTROL_PARAMETERS_V3 ++ ++//ucTableFormatRevision=1 ++//ucTableContentRevision=3 structure is not changed but usMisc add bit 1 as another input for ++// bit1=0: non-coherent mode ++// =1: coherent mode ++ ++//========================================================================================== ++//Only change is here next time when changing encoder parameter definitions again! ++#define LVDS_ENCODER_CONTROL_PARAMETERS_LAST LVDS_ENCODER_CONTROL_PARAMETERS_V3 ++#define LVDS_ENCODER_CONTROL_PS_ALLOCATION_LAST LVDS_ENCODER_CONTROL_PARAMETERS_LAST ++ ++#define TMDS1_ENCODER_CONTROL_PARAMETERS_LAST LVDS_ENCODER_CONTROL_PARAMETERS_V3 ++#define TMDS1_ENCODER_CONTROL_PS_ALLOCATION_LAST TMDS1_ENCODER_CONTROL_PARAMETERS_LAST ++ ++#define TMDS2_ENCODER_CONTROL_PARAMETERS_LAST LVDS_ENCODER_CONTROL_PARAMETERS_V3 ++#define TMDS2_ENCODER_CONTROL_PS_ALLOCATION_LAST TMDS2_ENCODER_CONTROL_PARAMETERS_LAST ++ ++#define DVO_ENCODER_CONTROL_PARAMETERS_LAST DVO_ENCODER_CONTROL_PARAMETERS ++#define DVO_ENCODER_CONTROL_PS_ALLOCATION_LAST DVO_ENCODER_CONTROL_PS_ALLOCATION ++ ++//========================================================================================== ++#define PANEL_ENCODER_MISC_DUAL 0x01 ++#define PANEL_ENCODER_MISC_COHERENT 0x02 ++#define PANEL_ENCODER_MISC_TMDS_LINKB 0x04 ++#define PANEL_ENCODER_MISC_HDMI_TYPE 0x08 ++ ++#define PANEL_ENCODER_ACTION_DISABLE ATOM_DISABLE ++#define PANEL_ENCODER_ACTION_ENABLE ATOM_ENABLE ++#define PANEL_ENCODER_ACTION_COHERENTSEQ (ATOM_ENABLE+1) ++ ++#define PANEL_ENCODER_TRUNCATE_EN 0x01 ++#define PANEL_ENCODER_TRUNCATE_DEPTH 0x10 ++#define PANEL_ENCODER_SPATIAL_DITHER_EN 0x01 ++#define PANEL_ENCODER_SPATIAL_DITHER_DEPTH 0x10 ++#define PANEL_ENCODER_TEMPORAL_DITHER_EN 0x01 ++#define PANEL_ENCODER_TEMPORAL_DITHER_DEPTH 0x10 ++#define PANEL_ENCODER_TEMPORAL_LEVEL_4 0x20 ++#define PANEL_ENCODER_25FRC_MASK 0x10 ++#define PANEL_ENCODER_25FRC_E 0x00 ++#define PANEL_ENCODER_25FRC_F 0x10 ++#define PANEL_ENCODER_50FRC_MASK 0x60 ++#define PANEL_ENCODER_50FRC_A 0x00 ++#define PANEL_ENCODER_50FRC_B 0x20 ++#define PANEL_ENCODER_50FRC_C 0x40 ++#define PANEL_ENCODER_50FRC_D 0x60 ++#define PANEL_ENCODER_75FRC_MASK 0x80 ++#define PANEL_ENCODER_75FRC_E 0x00 ++#define PANEL_ENCODER_75FRC_F 0x80 ++ ++/****************************************************************************/ ++// Structures used by SetVoltageTable ++/****************************************************************************/ ++#define SET_VOLTAGE_TYPE_ASIC_VDDC 1 ++#define SET_VOLTAGE_TYPE_ASIC_MVDDC 2 ++#define SET_VOLTAGE_TYPE_ASIC_MVDDQ 3 ++#define SET_VOLTAGE_TYPE_ASIC_VDDCI 4 ++#define SET_VOLTAGE_INIT_MODE 5 ++#define SET_VOLTAGE_GET_MAX_VOLTAGE 6 //Gets the Max. voltage for the soldered Asic ++ ++#define SET_ASIC_VOLTAGE_MODE_ALL_SOURCE 0x1 ++#define SET_ASIC_VOLTAGE_MODE_SOURCE_A 0x2 ++#define SET_ASIC_VOLTAGE_MODE_SOURCE_B 0x4 ++ ++#define SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE 0x0 ++#define SET_ASIC_VOLTAGE_MODE_GET_GPIOVAL 0x1 ++#define SET_ASIC_VOLTAGE_MODE_GET_GPIOMASK 0x2 ++ ++typedef struct _SET_VOLTAGE_PARAMETERS ++{ ++ UCHAR ucVoltageType; // To tell which voltage to set up, VDDC/MVDDC/MVDDQ ++ UCHAR ucVoltageMode; // To set all, to set source A or source B or ... ++ UCHAR ucVoltageIndex; // An index to tell which voltage level ++ UCHAR ucReserved; ++}SET_VOLTAGE_PARAMETERS; ++ ++typedef struct _SET_VOLTAGE_PARAMETERS_V2 ++{ ++ UCHAR ucVoltageType; // To tell which voltage to set up, VDDC/MVDDC/MVDDQ ++ UCHAR ucVoltageMode; // Not used, maybe use for state machine for differen power mode ++ USHORT usVoltageLevel; // real voltage level ++}SET_VOLTAGE_PARAMETERS_V2; ++ ++typedef struct _SET_VOLTAGE_PS_ALLOCATION ++{ ++ SET_VOLTAGE_PARAMETERS sASICSetVoltage; ++ WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; ++}SET_VOLTAGE_PS_ALLOCATION; ++ ++/****************************************************************************/ ++// Structures used by TVEncoderControlTable ++/****************************************************************************/ ++typedef struct _TV_ENCODER_CONTROL_PARAMETERS ++{ ++ USHORT usPixelClock; // in 10KHz; for bios convenient ++ UCHAR ucTvStandard; // See definition "ATOM_TV_NTSC ..." ++ UCHAR ucAction; // 0: turn off encoder ++ // 1: setup and turn on encoder ++}TV_ENCODER_CONTROL_PARAMETERS; ++ ++typedef struct _TV_ENCODER_CONTROL_PS_ALLOCATION ++{ ++ TV_ENCODER_CONTROL_PARAMETERS sTVEncoder; ++ WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; // Don't set this one ++}TV_ENCODER_CONTROL_PS_ALLOCATION; ++ ++//==============================Data Table Portion==================================== ++ ++#ifdef UEFI_BUILD ++ #define UTEMP USHORT ++ #define USHORT void* ++#endif ++ ++/****************************************************************************/ ++// Structure used in Data.mtb ++/****************************************************************************/ ++typedef struct _ATOM_MASTER_LIST_OF_DATA_TABLES ++{ ++ USHORT UtilityPipeLine; // Offest for the utility to get parser info,Don't change this position! ++ USHORT MultimediaCapabilityInfo; // Only used by MM Lib,latest version 1.1, not configuable from Bios, need to include the table to build Bios ++ USHORT MultimediaConfigInfo; // Only used by MM Lib,latest version 2.1, not configuable from Bios, need to include the table to build Bios ++ USHORT StandardVESA_Timing; // Only used by Bios ++ USHORT FirmwareInfo; // Shared by various SW components,latest version 1.4 ++ USHORT DAC_Info; // Will be obsolete from R600 ++ USHORT LVDS_Info; // Shared by various SW components,latest version 1.1 ++ USHORT TMDS_Info; // Will be obsolete from R600 ++ USHORT AnalogTV_Info; // Shared by various SW components,latest version 1.1 ++ USHORT SupportedDevicesInfo; // Will be obsolete from R600 ++ USHORT GPIO_I2C_Info; // Shared by various SW components,latest version 1.2 will be used from R600 ++ USHORT VRAM_UsageByFirmware; // Shared by various SW components,latest version 1.3 will be used from R600 ++ USHORT GPIO_Pin_LUT; // Shared by various SW components,latest version 1.1 ++ USHORT VESA_ToInternalModeLUT; // Only used by Bios ++ USHORT ComponentVideoInfo; // Shared by various SW components,latest version 2.1 will be used from R600 ++ USHORT PowerPlayInfo; // Shared by various SW components,latest version 2.1,new design from R600 ++ USHORT CompassionateData; // Will be obsolete from R600 ++ USHORT SaveRestoreInfo; // Only used by Bios ++ USHORT PPLL_SS_Info; // Shared by various SW components,latest version 1.2, used to call SS_Info, change to new name because of int ASIC SS info ++ USHORT OemInfo; // Defined and used by external SW, should be obsolete soon ++ USHORT XTMDS_Info; // Will be obsolete from R600 ++ USHORT MclkSS_Info; // Shared by various SW components,latest version 1.1, only enabled when ext SS chip is used ++ USHORT Object_Header; // Shared by various SW components,latest version 1.1 ++ USHORT IndirectIOAccess; // Only used by Bios,this table position can't change at all!! ++ USHORT MC_InitParameter; // Only used by command table ++ USHORT ASIC_VDDC_Info; // Will be obsolete from R600 ++ USHORT ASIC_InternalSS_Info; // New tabel name from R600, used to be called "ASIC_MVDDC_Info" ++ USHORT TV_VideoMode; // Only used by command table ++ USHORT VRAM_Info; // Only used by command table, latest version 1.3 ++ USHORT MemoryTrainingInfo; // Used for VBIOS and Diag utility for memory training purpose since R600. the new table rev start from 2.1 ++ USHORT IntegratedSystemInfo; // Shared by various SW components ++ USHORT ASIC_ProfilingInfo; // New table name from R600, used to be called "ASIC_VDDCI_Info" for pre-R600 ++ USHORT VoltageObjectInfo; // Shared by various SW components, latest version 1.1 ++ USHORT PowerSourceInfo; // Shared by various SW components, latest versoin 1.1 ++}ATOM_MASTER_LIST_OF_DATA_TABLES; ++ ++#ifdef UEFI_BUILD ++ #define USHORT UTEMP ++#endif ++ ++typedef struct _ATOM_MASTER_DATA_TABLE ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ ATOM_MASTER_LIST_OF_DATA_TABLES ListOfDataTables; ++}ATOM_MASTER_DATA_TABLE; ++ ++/****************************************************************************/ ++// Structure used in MultimediaCapabilityInfoTable ++/****************************************************************************/ ++typedef struct _ATOM_MULTIMEDIA_CAPABILITY_INFO ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ ULONG ulSignature; // HW info table signature string "$ATI" ++ UCHAR ucI2C_Type; // I2C type (normal GP_IO, ImpactTV GP_IO, Dedicated I2C pin, etc) ++ UCHAR ucTV_OutInfo; // Type of TV out supported (3:0) and video out crystal frequency (6:4) and TV data port (7) ++ UCHAR ucVideoPortInfo; // Provides the video port capabilities ++ UCHAR ucHostPortInfo; // Provides host port configuration information ++}ATOM_MULTIMEDIA_CAPABILITY_INFO; ++ ++/****************************************************************************/ ++// Structure used in MultimediaConfigInfoTable ++/****************************************************************************/ ++typedef struct _ATOM_MULTIMEDIA_CONFIG_INFO ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ ULONG ulSignature; // MM info table signature sting "$MMT" ++ UCHAR ucTunerInfo; // Type of tuner installed on the adapter (4:0) and video input for tuner (7:5) ++ UCHAR ucAudioChipInfo; // List the audio chip type (3:0) product type (4) and OEM revision (7:5) ++ UCHAR ucProductID; // Defines as OEM ID or ATI board ID dependent on product type setting ++ UCHAR ucMiscInfo1; // Tuner voltage (1:0) HW teletext support (3:2) FM audio decoder (5:4) reserved (6) audio scrambling (7) ++ UCHAR ucMiscInfo2; // I2S input config (0) I2S output config (1) I2S Audio Chip (4:2) SPDIF Output Config (5) reserved (7:6) ++ UCHAR ucMiscInfo3; // Video Decoder Type (3:0) Video In Standard/Crystal (7:4) ++ UCHAR ucMiscInfo4; // Video Decoder Host Config (2:0) reserved (7:3) ++ UCHAR ucVideoInput0Info;// Video Input 0 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) ++ UCHAR ucVideoInput1Info;// Video Input 1 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) ++ UCHAR ucVideoInput2Info;// Video Input 2 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) ++ UCHAR ucVideoInput3Info;// Video Input 3 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) ++ UCHAR ucVideoInput4Info;// Video Input 4 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) ++}ATOM_MULTIMEDIA_CONFIG_INFO; ++ ++/****************************************************************************/ ++// Structures used in FirmwareInfoTable ++/****************************************************************************/ ++ ++// usBIOSCapability Defintion: ++// Bit 0 = 0: Bios image is not Posted, =1:Bios image is Posted; ++// Bit 1 = 0: Dual CRTC is not supported, =1: Dual CRTC is supported; ++// Bit 2 = 0: Extended Desktop is not supported, =1: Extended Desktop is supported; ++// Others: Reserved ++#define ATOM_BIOS_INFO_ATOM_FIRMWARE_POSTED 0x0001 ++#define ATOM_BIOS_INFO_DUAL_CRTC_SUPPORT 0x0002 ++#define ATOM_BIOS_INFO_EXTENDED_DESKTOP_SUPPORT 0x0004 ++#define ATOM_BIOS_INFO_MEMORY_CLOCK_SS_SUPPORT 0x0008 ++#define ATOM_BIOS_INFO_ENGINE_CLOCK_SS_SUPPORT 0x0010 ++#define ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU 0x0020 ++#define ATOM_BIOS_INFO_WMI_SUPPORT 0x0040 ++#define ATOM_BIOS_INFO_PPMODE_ASSIGNGED_BY_SYSTEM 0x0080 ++#define ATOM_BIOS_INFO_HYPERMEMORY_SUPPORT 0x0100 ++#define ATOM_BIOS_INFO_HYPERMEMORY_SIZE_MASK 0x1E00 ++#define ATOM_BIOS_INFO_VPOST_WITHOUT_FIRST_MODE_SET 0x2000 ++#define ATOM_BIOS_INFO_BIOS_SCRATCH6_SCL2_REDEFINE 0x4000 ++ ++ ++#ifndef _H2INC ++ ++//Please don't add or expand this bitfield structure below, this one will retire soon.! ++typedef struct _ATOM_FIRMWARE_CAPABILITY ++{ ++#if ATOM_BIG_ENDIAN ++ USHORT Reserved:3; ++ USHORT HyperMemory_Size:4; ++ USHORT HyperMemory_Support:1; ++ USHORT PPMode_Assigned:1; ++ USHORT WMI_SUPPORT:1; ++ USHORT GPUControlsBL:1; ++ USHORT EngineClockSS_Support:1; ++ USHORT MemoryClockSS_Support:1; ++ USHORT ExtendedDesktopSupport:1; ++ USHORT DualCRTC_Support:1; ++ USHORT FirmwarePosted:1; ++#else ++ USHORT FirmwarePosted:1; ++ USHORT DualCRTC_Support:1; ++ USHORT ExtendedDesktopSupport:1; ++ USHORT MemoryClockSS_Support:1; ++ USHORT EngineClockSS_Support:1; ++ USHORT GPUControlsBL:1; ++ USHORT WMI_SUPPORT:1; ++ USHORT PPMode_Assigned:1; ++ USHORT HyperMemory_Support:1; ++ USHORT HyperMemory_Size:4; ++ USHORT Reserved:3; ++#endif ++}ATOM_FIRMWARE_CAPABILITY; ++ ++typedef union _ATOM_FIRMWARE_CAPABILITY_ACCESS ++{ ++ ATOM_FIRMWARE_CAPABILITY sbfAccess; ++ USHORT susAccess; ++}ATOM_FIRMWARE_CAPABILITY_ACCESS; ++ ++#else ++ ++typedef union _ATOM_FIRMWARE_CAPABILITY_ACCESS ++{ ++ USHORT susAccess; ++}ATOM_FIRMWARE_CAPABILITY_ACCESS; ++ ++#endif ++ ++typedef struct _ATOM_FIRMWARE_INFO ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ ULONG ulFirmwareRevision; ++ ULONG ulDefaultEngineClock; //In 10Khz unit ++ ULONG ulDefaultMemoryClock; //In 10Khz unit ++ ULONG ulDriverTargetEngineClock; //In 10Khz unit ++ ULONG ulDriverTargetMemoryClock; //In 10Khz unit ++ ULONG ulMaxEngineClockPLL_Output; //In 10Khz unit ++ ULONG ulMaxMemoryClockPLL_Output; //In 10Khz unit ++ ULONG ulMaxPixelClockPLL_Output; //In 10Khz unit ++ ULONG ulASICMaxEngineClock; //In 10Khz unit ++ ULONG ulASICMaxMemoryClock; //In 10Khz unit ++ UCHAR ucASICMaxTemperature; ++ UCHAR ucPadding[3]; //Don't use them ++ ULONG aulReservedForBIOS[3]; //Don't use them ++ USHORT usMinEngineClockPLL_Input; //In 10Khz unit ++ USHORT usMaxEngineClockPLL_Input; //In 10Khz unit ++ USHORT usMinEngineClockPLL_Output; //In 10Khz unit ++ USHORT usMinMemoryClockPLL_Input; //In 10Khz unit ++ USHORT usMaxMemoryClockPLL_Input; //In 10Khz unit ++ USHORT usMinMemoryClockPLL_Output; //In 10Khz unit ++ USHORT usMaxPixelClock; //In 10Khz unit, Max. Pclk ++ USHORT usMinPixelClockPLL_Input; //In 10Khz unit ++ USHORT usMaxPixelClockPLL_Input; //In 10Khz unit ++ USHORT usMinPixelClockPLL_Output; //In 10Khz unit, the definitions above can't change!!! ++ ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability; ++ USHORT usReferenceClock; //In 10Khz unit ++ USHORT usPM_RTS_Location; //RTS PM4 starting location in ROM in 1Kb unit ++ UCHAR ucPM_RTS_StreamSize; //RTS PM4 packets in Kb unit ++ UCHAR ucDesign_ID; //Indicate what is the board design ++ UCHAR ucMemoryModule_ID; //Indicate what is the board design ++}ATOM_FIRMWARE_INFO; ++ ++typedef struct _ATOM_FIRMWARE_INFO_V1_2 ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ ULONG ulFirmwareRevision; ++ ULONG ulDefaultEngineClock; //In 10Khz unit ++ ULONG ulDefaultMemoryClock; //In 10Khz unit ++ ULONG ulDriverTargetEngineClock; //In 10Khz unit ++ ULONG ulDriverTargetMemoryClock; //In 10Khz unit ++ ULONG ulMaxEngineClockPLL_Output; //In 10Khz unit ++ ULONG ulMaxMemoryClockPLL_Output; //In 10Khz unit ++ ULONG ulMaxPixelClockPLL_Output; //In 10Khz unit ++ ULONG ulASICMaxEngineClock; //In 10Khz unit ++ ULONG ulASICMaxMemoryClock; //In 10Khz unit ++ UCHAR ucASICMaxTemperature; ++ UCHAR ucMinAllowedBL_Level; ++ UCHAR ucPadding[2]; //Don't use them ++ ULONG aulReservedForBIOS[2]; //Don't use them ++ ULONG ulMinPixelClockPLL_Output; //In 10Khz unit ++ USHORT usMinEngineClockPLL_Input; //In 10Khz unit ++ USHORT usMaxEngineClockPLL_Input; //In 10Khz unit ++ USHORT usMinEngineClockPLL_Output; //In 10Khz unit ++ USHORT usMinMemoryClockPLL_Input; //In 10Khz unit ++ USHORT usMaxMemoryClockPLL_Input; //In 10Khz unit ++ USHORT usMinMemoryClockPLL_Output; //In 10Khz unit ++ USHORT usMaxPixelClock; //In 10Khz unit, Max. Pclk ++ USHORT usMinPixelClockPLL_Input; //In 10Khz unit ++ USHORT usMaxPixelClockPLL_Input; //In 10Khz unit ++ USHORT usMinPixelClockPLL_Output; //In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output ++ ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability; ++ USHORT usReferenceClock; //In 10Khz unit ++ USHORT usPM_RTS_Location; //RTS PM4 starting location in ROM in 1Kb unit ++ UCHAR ucPM_RTS_StreamSize; //RTS PM4 packets in Kb unit ++ UCHAR ucDesign_ID; //Indicate what is the board design ++ UCHAR ucMemoryModule_ID; //Indicate what is the board design ++}ATOM_FIRMWARE_INFO_V1_2; ++ ++typedef struct _ATOM_FIRMWARE_INFO_V1_3 ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ ULONG ulFirmwareRevision; ++ ULONG ulDefaultEngineClock; //In 10Khz unit ++ ULONG ulDefaultMemoryClock; //In 10Khz unit ++ ULONG ulDriverTargetEngineClock; //In 10Khz unit ++ ULONG ulDriverTargetMemoryClock; //In 10Khz unit ++ ULONG ulMaxEngineClockPLL_Output; //In 10Khz unit ++ ULONG ulMaxMemoryClockPLL_Output; //In 10Khz unit ++ ULONG ulMaxPixelClockPLL_Output; //In 10Khz unit ++ ULONG ulASICMaxEngineClock; //In 10Khz unit ++ ULONG ulASICMaxMemoryClock; //In 10Khz unit ++ UCHAR ucASICMaxTemperature; ++ UCHAR ucMinAllowedBL_Level; ++ UCHAR ucPadding[2]; //Don't use them ++ ULONG aulReservedForBIOS; //Don't use them ++ ULONG ul3DAccelerationEngineClock;//In 10Khz unit ++ ULONG ulMinPixelClockPLL_Output; //In 10Khz unit ++ USHORT usMinEngineClockPLL_Input; //In 10Khz unit ++ USHORT usMaxEngineClockPLL_Input; //In 10Khz unit ++ USHORT usMinEngineClockPLL_Output; //In 10Khz unit ++ USHORT usMinMemoryClockPLL_Input; //In 10Khz unit ++ USHORT usMaxMemoryClockPLL_Input; //In 10Khz unit ++ USHORT usMinMemoryClockPLL_Output; //In 10Khz unit ++ USHORT usMaxPixelClock; //In 10Khz unit, Max. Pclk ++ USHORT usMinPixelClockPLL_Input; //In 10Khz unit ++ USHORT usMaxPixelClockPLL_Input; //In 10Khz unit ++ USHORT usMinPixelClockPLL_Output; //In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output ++ ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability; ++ USHORT usReferenceClock; //In 10Khz unit ++ USHORT usPM_RTS_Location; //RTS PM4 starting location in ROM in 1Kb unit ++ UCHAR ucPM_RTS_StreamSize; //RTS PM4 packets in Kb unit ++ UCHAR ucDesign_ID; //Indicate what is the board design ++ UCHAR ucMemoryModule_ID; //Indicate what is the board design ++}ATOM_FIRMWARE_INFO_V1_3; ++ ++typedef struct _ATOM_FIRMWARE_INFO_V1_4 ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ ULONG ulFirmwareRevision; ++ ULONG ulDefaultEngineClock; //In 10Khz unit ++ ULONG ulDefaultMemoryClock; //In 10Khz unit ++ ULONG ulDriverTargetEngineClock; //In 10Khz unit ++ ULONG ulDriverTargetMemoryClock; //In 10Khz unit ++ ULONG ulMaxEngineClockPLL_Output; //In 10Khz unit ++ ULONG ulMaxMemoryClockPLL_Output; //In 10Khz unit ++ ULONG ulMaxPixelClockPLL_Output; //In 10Khz unit ++ ULONG ulASICMaxEngineClock; //In 10Khz unit ++ ULONG ulASICMaxMemoryClock; //In 10Khz unit ++ UCHAR ucASICMaxTemperature; ++ UCHAR ucMinAllowedBL_Level; ++ USHORT usBootUpVDDCVoltage; //In MV unit ++ USHORT usLcdMinPixelClockPLL_Output; // In MHz unit ++ USHORT usLcdMaxPixelClockPLL_Output; // In MHz unit ++ ULONG ul3DAccelerationEngineClock;//In 10Khz unit ++ ULONG ulMinPixelClockPLL_Output; //In 10Khz unit ++ USHORT usMinEngineClockPLL_Input; //In 10Khz unit ++ USHORT usMaxEngineClockPLL_Input; //In 10Khz unit ++ USHORT usMinEngineClockPLL_Output; //In 10Khz unit ++ USHORT usMinMemoryClockPLL_Input; //In 10Khz unit ++ USHORT usMaxMemoryClockPLL_Input; //In 10Khz unit ++ USHORT usMinMemoryClockPLL_Output; //In 10Khz unit ++ USHORT usMaxPixelClock; //In 10Khz unit, Max. Pclk ++ USHORT usMinPixelClockPLL_Input; //In 10Khz unit ++ USHORT usMaxPixelClockPLL_Input; //In 10Khz unit ++ USHORT usMinPixelClockPLL_Output; //In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output ++ ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability; ++ USHORT usReferenceClock; //In 10Khz unit ++ USHORT usPM_RTS_Location; //RTS PM4 starting location in ROM in 1Kb unit ++ UCHAR ucPM_RTS_StreamSize; //RTS PM4 packets in Kb unit ++ UCHAR ucDesign_ID; //Indicate what is the board design ++ UCHAR ucMemoryModule_ID; //Indicate what is the board design ++}ATOM_FIRMWARE_INFO_V1_4; ++ ++#define ATOM_FIRMWARE_INFO_LAST ATOM_FIRMWARE_INFO_V1_4 ++ ++/****************************************************************************/ ++// Structures used in IntegratedSystemInfoTable ++/****************************************************************************/ ++#define IGP_CAP_FLAG_DYNAMIC_CLOCK_EN 0x2 ++#define IGP_CAP_FLAG_AC_CARD 0x4 ++#define IGP_CAP_FLAG_SDVO_CARD 0x8 ++#define IGP_CAP_FLAG_POSTDIV_BY_2_MODE 0x10 ++ ++typedef struct _ATOM_INTEGRATED_SYSTEM_INFO ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ ULONG ulBootUpEngineClock; //in 10kHz unit ++ ULONG ulBootUpMemoryClock; //in 10kHz unit ++ ULONG ulMaxSystemMemoryClock; //in 10kHz unit ++ ULONG ulMinSystemMemoryClock; //in 10kHz unit ++ UCHAR ucNumberOfCyclesInPeriodHi; ++ UCHAR ucLCDTimingSel; //=0:not valid.!=0 sel this timing descriptor from LCD EDID. ++ USHORT usReserved1; ++ USHORT usInterNBVoltageLow; //An intermidiate PMW value to set the voltage ++ USHORT usInterNBVoltageHigh; //Another intermidiate PMW value to set the voltage ++ ULONG ulReserved[2]; ++ ++ USHORT usFSBClock; //In MHz unit ++ USHORT usCapabilityFlag; //Bit0=1 indicates the fake HDMI support,Bit1=0/1 for Dynamic clocking dis/enable ++ //Bit[3:2]== 0:No PCIE card, 1:AC card, 2:SDVO card ++ //Bit[4]==1: P/2 mode, ==0: P/1 mode ++ USHORT usPCIENBCfgReg7; //bit[7:0]=MUX_Sel, bit[9:8]=MUX_SEL_LEVEL2, bit[10]=Lane_Reversal ++ USHORT usK8MemoryClock; //in MHz unit ++ USHORT usK8SyncStartDelay; //in 0.01 us unit ++ USHORT usK8DataReturnTime; //in 0.01 us unit ++ UCHAR ucMaxNBVoltage; ++ UCHAR ucMinNBVoltage; ++ UCHAR ucMemoryType; //[7:4]=1:DDR1;=2:DDR2;=3:DDR3.[3:0] is reserved ++ UCHAR ucNumberOfCyclesInPeriod; //CG.FVTHROT_PWM_CTRL_REG0.NumberOfCyclesInPeriod ++ UCHAR ucStartingPWM_HighTime; //CG.FVTHROT_PWM_CTRL_REG0.StartingPWM_HighTime ++ UCHAR ucHTLinkWidth; //16 bit vs. 8 bit ++ UCHAR ucMaxNBVoltageHigh; ++ UCHAR ucMinNBVoltageHigh; ++}ATOM_INTEGRATED_SYSTEM_INFO; ++ ++/* Explanation on entries in ATOM_INTEGRATED_SYSTEM_INFO ++ulBootUpMemoryClock: For Intel IGP,it's the UMA system memory clock ++ For AMD IGP,it's 0 if no SidePort memory installed or it's the boot-up SidePort memory clock ++ulMaxSystemMemoryClock: For Intel IGP,it's the Max freq from memory SPD if memory runs in ASYNC mode or otherwise (SYNC mode) it's 0 ++ For AMD IGP,for now this can be 0 ++ulMinSystemMemoryClock: For Intel IGP,it's 133MHz if memory runs in ASYNC mode or otherwise (SYNC mode) it's 0 ++ For AMD IGP,for now this can be 0 ++ ++usFSBClock: For Intel IGP,it's FSB Freq ++ For AMD IGP,it's HT Link Speed ++ ++usK8MemoryClock: For AMD IGP only. For RevF CPU, set it to 200 ++usK8SyncStartDelay: For AMD IGP only. Memory access latency in K8, required for watermark calculation ++usK8DataReturnTime: For AMD IGP only. Memory access latency in K8, required for watermark calculation ++ ++VC:Voltage Control ++ucMaxNBVoltage: Voltage regulator dependent PWM value. Low 8 bits of the value for the max voltage.Set this one to 0xFF if VC without PWM. Set this to 0x0 if no VC at all. ++ucMinNBVoltage: Voltage regulator dependent PWM value. Low 8 bits of the value for the min voltage.Set this one to 0x00 if VC without PWM or no VC at all. ++ ++ucNumberOfCyclesInPeriod: Indicate how many cycles when PWM duty is 100%. low 8 bits of the value. ++ucNumberOfCyclesInPeriodHi: Indicate how many cycles when PWM duty is 100%. high 8 bits of the value.If the PWM has an inverter,set bit [7]==1,otherwise set it 0 ++ ++ucMaxNBVoltageHigh: Voltage regulator dependent PWM value. High 8 bits of the value for the max voltage.Set this one to 0xFF if VC without PWM. Set this to 0x0 if no VC at all. ++ucMinNBVoltageHigh: Voltage regulator dependent PWM value. High 8 bits of the value for the min voltage.Set this one to 0x00 if VC without PWM or no VC at all. ++ ++ ++usInterNBVoltageLow: Voltage regulator dependent PWM value. The value makes the the voltage >=Min NB voltage but <=InterNBVoltageHigh. Set this to 0x0000 if VC without PWM or no VC at all. ++usInterNBVoltageHigh: Voltage regulator dependent PWM value. The value makes the the voltage >=InterNBVoltageLow but <=Max NB voltage.Set this to 0x0000 if VC without PWM or no VC at all. ++*/ ++ ++ ++/* ++The following IGP table is introduced from RS780, which is supposed to be put by SBIOS in FB before IGP VBIOS starts VPOST; ++Then VBIOS will copy the whole structure to its image so all GPU SW components can access this data structure to get whatever they need. ++The enough reservation should allow us to never change table revisions. Whenever needed, a GPU SW component can use reserved portion for new data entries. ++ ++SW components can access the IGP system infor structure in the same way as before ++*/ ++ ++ ++typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ ULONG ulBootUpEngineClock; //in 10kHz unit ++ ULONG ulReserved1[2]; //must be 0x0 for the reserved ++ ULONG ulBootUpUMAClock; //in 10kHz unit ++ ULONG ulBootUpSidePortClock; //in 10kHz unit ++ ULONG ulMinSidePortClock; //in 10kHz unit ++ ULONG ulReserved2[6]; //must be 0x0 for the reserved ++ ULONG ulSystemConfig; //see explanation below ++ ULONG ulBootUpReqDisplayVector; ++ ULONG ulOtherDisplayMisc; ++ ULONG ulDDISlot1Config; ++ ULONG ulDDISlot2Config; ++ UCHAR ucMemoryType; //[3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved ++ UCHAR ucUMAChannelNumber; ++ UCHAR ucDockingPinBit; ++ UCHAR ucDockingPinPolarity; ++ ULONG ulDockingPinCFGInfo; ++ ULONG ulCPUCapInfo; ++ USHORT usNumberOfCyclesInPeriod; ++ USHORT usMaxNBVoltage; ++ USHORT usMinNBVoltage; ++ USHORT usBootUpNBVoltage; ++ ULONG ulHTLinkFreq; //in 10Khz ++ USHORT usMinHTLinkWidth; ++ USHORT usMaxHTLinkWidth; ++ USHORT usUMASyncStartDelay; ++ USHORT usUMADataReturnTime; ++ USHORT usLinkStatusZeroTime; ++ USHORT usReserved; ++ ULONG ulHighVoltageHTLinkFreq; // in 10Khz ++ ULONG ulLowVoltageHTLinkFreq; // in 10Khz ++ USHORT usMaxUpStreamHTLinkWidth; ++ USHORT usMaxDownStreamHTLinkWidth; ++ USHORT usMinUpStreamHTLinkWidth; ++ USHORT usMinDownStreamHTLinkWidth; ++ ULONG ulReserved3[97]; //must be 0x0 ++}ATOM_INTEGRATED_SYSTEM_INFO_V2; ++ ++/* ++ulBootUpEngineClock: Boot-up Engine Clock in 10Khz; ++ulBootUpUMAClock: Boot-up UMA Clock in 10Khz; it must be 0x0 when UMA is not present ++ulBootUpSidePortClock: Boot-up SidePort Clock in 10Khz; it must be 0x0 when SidePort Memory is not present,this could be equal to or less than maximum supported Sideport memory clock ++ ++ulSystemConfig: ++Bit[0]=1: PowerExpress mode =0 Non-PowerExpress mode; ++Bit[1]=1: system boots up at AMD overdrived state or user customized mode. In this case, driver will just stick to this boot-up mode. No other PowerPlay state ++ =0: system boots up at driver control state. Power state depends on PowerPlay table. ++Bit[2]=1: PWM method is used on NB voltage control. =0: GPIO method is used. ++Bit[3]=1: Only one power state(Performance) will be supported. ++ =0: Multiple power states supported from PowerPlay table. ++Bit[4]=1: CLMC is supported and enabled on current system. ++ =0: CLMC is not supported or enabled on current system. SBIOS need to support HT link/freq change through ATIF interface. ++Bit[5]=1: Enable CDLW for all driver control power states. Max HT width is from SBIOS, while Min HT width is determined by display requirement. ++ =0: CDLW is disabled. If CLMC is enabled case, Min HT width will be set equal to Max HT width. If CLMC disabled case, Max HT width will be applied. ++Bit[6]=1: High Voltage requested for all power states. In this case, voltage will be forced at 1.1v and powerplay table voltage drop/throttling request will be ignored. ++ =0: Voltage settings is determined by powerplay table. ++Bit[7]=1: Enable CLMC as hybrid Mode. CDLD and CILR will be disabled in this case and we're using legacy C1E. This is workaround for CPU(Griffin) performance issue. ++ =0: Enable CLMC as regular mode, CDLD and CILR will be enabled. ++ ++ulBootUpReqDisplayVector: This dword is a bit vector indicates what display devices are requested during boot-up. Refer to ATOM_DEVICE_xxx_SUPPORT for the bit vector definitions. ++ ++ulOtherDisplayMisc: [15:8]- Bootup LCD Expansion selection; 0-center, 1-full panel size expansion; ++ [7:0] - BootupTV standard selection; This is a bit vector to indicate what TV standards are supported by the system. Refer to ucTVSuppportedStd definition; ++ ++ulDDISlot1Config: Describes the PCIE lane configuration on this DDI PCIE slot (ADD2 card) or connector (Mobile design). ++ [3:0] - Bit vector to indicate PCIE lane config of the DDI slot/connector on chassis (bit 0=1 lane 3:0; bit 1=1 lane 7:4; bit 2=1 lane 11:8; bit 3=1 lane 15:12) ++ [7:4] - Bit vector to indicate PCIE lane config of the same DDI slot/connector on docking station (bit 0=1 lane 3:0; bit 1=1 lane 7:4; bit 2=1 lane 11:8; bit 3=1 lane 15:12) ++ [15:8] - Lane configuration attribute; ++ [23:16]- Connector type, possible value: ++ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D ++ CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D ++ CONNECTOR_OBJECT_ID_HDMI_TYPE_A ++ CONNECTOR_OBJECT_ID_DISPLAYPORT ++ [31:24]- Reserved ++ ++ulDDISlot2Config: Same as Slot1. ++ucMemoryType: SidePort memory type, set it to 0x0 when Sideport memory is not installed. Driver needs this info to change sideport memory clock. Not for display in CCC. ++For IGP, Hypermemory is the only memory type showed in CCC. ++ ++ucUMAChannelNumber: how many channels for the UMA; ++ ++ulDockingPinCFGInfo: [15:0]-Bus/Device/Function # to CFG to read this Docking Pin; [31:16]-reg offset in CFG to read this pin ++ucDockingPinBit: which bit in this register to read the pin status; ++ucDockingPinPolarity:Polarity of the pin when docked; ++ ++ulCPUCapInfo: [7:0]=1:Griffin;[7:0]=2:Greyhound;[7:0]=3:K8, other bits reserved for now and must be 0x0 ++ ++usNumberOfCyclesInPeriod:Indicate how many cycles when PWM duty is 100%. ++usMaxNBVoltage:Max. voltage control value in either PWM or GPIO mode. ++usMinNBVoltage:Min. voltage control value in either PWM or GPIO mode. ++ GPIO mode: both usMaxNBVoltage & usMinNBVoltage have a valid value ulSystemConfig.SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE=0 ++ PWM mode: both usMaxNBVoltage & usMinNBVoltage have a valid value ulSystemConfig.SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE=1 ++ GPU SW don't control mode: usMaxNBVoltage & usMinNBVoltage=0 and no care about ulSystemConfig.SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE ++usBootUpNBVoltage:Boot-up voltage regulator dependent PWM value. ++ ++ulHTLinkFreq: Bootup HT link Frequency in 10Khz. ++usMinHTLinkWidth: Bootup minimum HT link width. If CDLW disabled, this is equal to usMaxHTLinkWidth. ++ If CDLW enabled, both upstream and downstream width should be the same during bootup. ++usMaxHTLinkWidth: Bootup maximum HT link width. If CDLW disabled, this is equal to usMinHTLinkWidth. ++ If CDLW enabled, both upstream and downstream width should be the same during bootup. ++ ++usUMASyncStartDelay: Memory access latency, required for watermark calculation ++usUMADataReturnTime: Memory access latency, required for watermark calculation ++usLinkStatusZeroTime:Memory access latency required for watermark calculation, set this to 0x0 for K8 CPU, set a proper value in 0.01 the unit of us ++for Griffin or Greyhound. SBIOS needs to convert to actual time by: ++ if T0Ttime [5:4]=00b, then usLinkStatusZeroTime=T0Ttime [3:0]*0.1us (0.0 to 1.5us) ++ if T0Ttime [5:4]=01b, then usLinkStatusZeroTime=T0Ttime [3:0]*0.5us (0.0 to 7.5us) ++ if T0Ttime [5:4]=10b, then usLinkStatusZeroTime=T0Ttime [3:0]*2.0us (0.0 to 30us) ++ if T0Ttime [5:4]=11b, and T0Ttime [3:0]=0x0 to 0xa, then usLinkStatusZeroTime=T0Ttime [3:0]*20us (0.0 to 200us) ++ ++ulHighVoltageHTLinkFreq: HT link frequency for power state with low voltage. If boot up runs in HT1, this must be 0. ++ This must be less than or equal to ulHTLinkFreq(bootup frequency). ++ulLowVoltageHTLinkFreq: HT link frequency for power state with low voltage or voltage scaling 1.0v~1.1v. If boot up runs in HT1, this must be 0. ++ This must be less than or equal to ulHighVoltageHTLinkFreq. ++ ++usMaxUpStreamHTLinkWidth: Asymmetric link width support in the future, to replace usMaxHTLinkWidth. Not used for now. ++usMaxDownStreamHTLinkWidth: same as above. ++usMinUpStreamHTLinkWidth: Asymmetric link width support in the future, to replace usMinHTLinkWidth. Not used for now. ++usMinDownStreamHTLinkWidth: same as above. ++*/ ++ ++ ++#define SYSTEM_CONFIG_POWEREXPRESS_ENABLE 0x00000001 ++#define SYSTEM_CONFIG_RUN_AT_OVERDRIVE_ENGINE 0x00000002 ++#define SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE 0x00000004 ++#define SYSTEM_CONFIG_PERFORMANCE_POWERSTATE_ONLY 0x00000008 ++#define SYSTEM_CONFIG_CLMC_ENABLED 0x00000010 ++#define SYSTEM_CONFIG_CDLW_ENABLED 0x00000020 ++#define SYSTEM_CONFIG_HIGH_VOLTAGE_REQUESTED 0x00000040 ++#define SYSTEM_CONFIG_CLMC_HYBRID_MODE_ENABLED 0x00000080 ++ ++#define IGP_DDI_SLOT_LANE_CONFIG_MASK 0x000000FF ++ ++#define b0IGP_DDI_SLOT_LANE_MAP_MASK 0x0F ++#define b0IGP_DDI_SLOT_DOCKING_LANE_MAP_MASK 0xF0 ++#define b0IGP_DDI_SLOT_CONFIG_LANE_0_3 0x01 ++#define b0IGP_DDI_SLOT_CONFIG_LANE_4_7 0x02 ++#define b0IGP_DDI_SLOT_CONFIG_LANE_8_11 0x04 ++#define b0IGP_DDI_SLOT_CONFIG_LANE_12_15 0x08 ++ ++#define IGP_DDI_SLOT_ATTRIBUTE_MASK 0x0000FF00 ++#define IGP_DDI_SLOT_CONFIG_REVERSED 0x00000100 ++#define b1IGP_DDI_SLOT_CONFIG_REVERSED 0x01 ++ ++#define IGP_DDI_SLOT_CONNECTOR_TYPE_MASK 0x00FF0000 ++ ++#define ATOM_CRT_INT_ENCODER1_INDEX 0x00000000 ++#define ATOM_LCD_INT_ENCODER1_INDEX 0x00000001 ++#define ATOM_TV_INT_ENCODER1_INDEX 0x00000002 ++#define ATOM_DFP_INT_ENCODER1_INDEX 0x00000003 ++#define ATOM_CRT_INT_ENCODER2_INDEX 0x00000004 ++#define ATOM_LCD_EXT_ENCODER1_INDEX 0x00000005 ++#define ATOM_TV_EXT_ENCODER1_INDEX 0x00000006 ++#define ATOM_DFP_EXT_ENCODER1_INDEX 0x00000007 ++#define ATOM_CV_INT_ENCODER1_INDEX 0x00000008 ++#define ATOM_DFP_INT_ENCODER2_INDEX 0x00000009 ++#define ATOM_CRT_EXT_ENCODER1_INDEX 0x0000000A ++#define ATOM_CV_EXT_ENCODER1_INDEX 0x0000000B ++#define ATOM_DFP_INT_ENCODER3_INDEX 0x0000000C ++#define ATOM_DFP_INT_ENCODER4_INDEX 0x0000000D ++ ++// define ASIC internal encoder id ( bit vector ) ++#define ASIC_INT_DAC1_ENCODER_ID 0x00 ++#define ASIC_INT_TV_ENCODER_ID 0x02 ++#define ASIC_INT_DIG1_ENCODER_ID 0x03 ++#define ASIC_INT_DAC2_ENCODER_ID 0x04 ++#define ASIC_EXT_TV_ENCODER_ID 0x06 ++#define ASIC_INT_DVO_ENCODER_ID 0x07 ++#define ASIC_INT_DIG2_ENCODER_ID 0x09 ++#define ASIC_EXT_DIG_ENCODER_ID 0x05 ++ ++//define Encoder attribute ++#define ATOM_ANALOG_ENCODER 0 ++#define ATOM_DIGITAL_ENCODER 1 ++ ++#define ATOM_DEVICE_CRT1_INDEX 0x00000000 ++#define ATOM_DEVICE_LCD1_INDEX 0x00000001 ++#define ATOM_DEVICE_TV1_INDEX 0x00000002 ++#define ATOM_DEVICE_DFP1_INDEX 0x00000003 ++#define ATOM_DEVICE_CRT2_INDEX 0x00000004 ++#define ATOM_DEVICE_LCD2_INDEX 0x00000005 ++#define ATOM_DEVICE_TV2_INDEX 0x00000006 ++#define ATOM_DEVICE_DFP2_INDEX 0x00000007 ++#define ATOM_DEVICE_CV_INDEX 0x00000008 ++#define ATOM_DEVICE_DFP3_INDEX 0x00000009 ++#define ATOM_DEVICE_DFP4_INDEX 0x0000000A ++#define ATOM_DEVICE_DFP5_INDEX 0x0000000B ++#define ATOM_DEVICE_RESERVEDC_INDEX 0x0000000C ++#define ATOM_DEVICE_RESERVEDD_INDEX 0x0000000D ++#define ATOM_DEVICE_RESERVEDE_INDEX 0x0000000E ++#define ATOM_DEVICE_RESERVEDF_INDEX 0x0000000F ++#define ATOM_MAX_SUPPORTED_DEVICE_INFO (ATOM_DEVICE_DFP3_INDEX+1) ++#define ATOM_MAX_SUPPORTED_DEVICE_INFO_2 ATOM_MAX_SUPPORTED_DEVICE_INFO ++#define ATOM_MAX_SUPPORTED_DEVICE_INFO_3 (ATOM_DEVICE_DFP5_INDEX + 1 ) ++ ++#define ATOM_MAX_SUPPORTED_DEVICE (ATOM_DEVICE_RESERVEDF_INDEX+1) ++ ++#define ATOM_DEVICE_CRT1_SUPPORT (0x1L << ATOM_DEVICE_CRT1_INDEX ) ++#define ATOM_DEVICE_LCD1_SUPPORT (0x1L << ATOM_DEVICE_LCD1_INDEX ) ++#define ATOM_DEVICE_TV1_SUPPORT (0x1L << ATOM_DEVICE_TV1_INDEX ) ++#define ATOM_DEVICE_DFP1_SUPPORT (0x1L << ATOM_DEVICE_DFP1_INDEX) ++#define ATOM_DEVICE_CRT2_SUPPORT (0x1L << ATOM_DEVICE_CRT2_INDEX ) ++#define ATOM_DEVICE_LCD2_SUPPORT (0x1L << ATOM_DEVICE_LCD2_INDEX ) ++#define ATOM_DEVICE_TV2_SUPPORT (0x1L << ATOM_DEVICE_TV2_INDEX ) ++#define ATOM_DEVICE_DFP2_SUPPORT (0x1L << ATOM_DEVICE_DFP2_INDEX) ++#define ATOM_DEVICE_CV_SUPPORT (0x1L << ATOM_DEVICE_CV_INDEX ) ++#define ATOM_DEVICE_DFP3_SUPPORT (0x1L << ATOM_DEVICE_DFP3_INDEX ) ++#define ATOM_DEVICE_DFP4_SUPPORT (0x1L << ATOM_DEVICE_DFP4_INDEX ) ++#define ATOM_DEVICE_DFP5_SUPPORT (0x1L << ATOM_DEVICE_DFP5_INDEX ) ++ ++#define ATOM_DEVICE_CRT_SUPPORT ATOM_DEVICE_CRT1_SUPPORT | ATOM_DEVICE_CRT2_SUPPORT ++#define ATOM_DEVICE_DFP_SUPPORT ATOM_DEVICE_DFP1_SUPPORT | ATOM_DEVICE_DFP2_SUPPORT | ATOM_DEVICE_DFP3_SUPPORT | ATOM_DEVICE_DFP4_SUPPORT | ATOM_DEVICE_DFP5_SUPPORT ++#define ATOM_DEVICE_TV_SUPPORT ATOM_DEVICE_TV1_SUPPORT | ATOM_DEVICE_TV2_SUPPORT ++#define ATOM_DEVICE_LCD_SUPPORT ATOM_DEVICE_LCD1_SUPPORT | ATOM_DEVICE_LCD2_SUPPORT ++ ++#define ATOM_DEVICE_CONNECTOR_TYPE_MASK 0x000000F0 ++#define ATOM_DEVICE_CONNECTOR_TYPE_SHIFT 0x00000004 ++#define ATOM_DEVICE_CONNECTOR_VGA 0x00000001 ++#define ATOM_DEVICE_CONNECTOR_DVI_I 0x00000002 ++#define ATOM_DEVICE_CONNECTOR_DVI_D 0x00000003 ++#define ATOM_DEVICE_CONNECTOR_DVI_A 0x00000004 ++#define ATOM_DEVICE_CONNECTOR_SVIDEO 0x00000005 ++#define ATOM_DEVICE_CONNECTOR_COMPOSITE 0x00000006 ++#define ATOM_DEVICE_CONNECTOR_LVDS 0x00000007 ++#define ATOM_DEVICE_CONNECTOR_DIGI_LINK 0x00000008 ++#define ATOM_DEVICE_CONNECTOR_SCART 0x00000009 ++#define ATOM_DEVICE_CONNECTOR_HDMI_TYPE_A 0x0000000A ++#define ATOM_DEVICE_CONNECTOR_HDMI_TYPE_B 0x0000000B ++#define ATOM_DEVICE_CONNECTOR_CASE_1 0x0000000E ++#define ATOM_DEVICE_CONNECTOR_DISPLAYPORT 0x0000000F ++ ++ ++#define ATOM_DEVICE_DAC_INFO_MASK 0x0000000F ++#define ATOM_DEVICE_DAC_INFO_SHIFT 0x00000000 ++#define ATOM_DEVICE_DAC_INFO_NODAC 0x00000000 ++#define ATOM_DEVICE_DAC_INFO_DACA 0x00000001 ++#define ATOM_DEVICE_DAC_INFO_DACB 0x00000002 ++#define ATOM_DEVICE_DAC_INFO_EXDAC 0x00000003 ++ ++#define ATOM_DEVICE_I2C_ID_NOI2C 0x00000000 ++ ++#define ATOM_DEVICE_I2C_LINEMUX_MASK 0x0000000F ++#define ATOM_DEVICE_I2C_LINEMUX_SHIFT 0x00000000 ++ ++#define ATOM_DEVICE_I2C_ID_MASK 0x00000070 ++#define ATOM_DEVICE_I2C_ID_SHIFT 0x00000004 ++#define ATOM_DEVICE_I2C_ID_IS_FOR_NON_MM_USE 0x00000001 ++#define ATOM_DEVICE_I2C_ID_IS_FOR_MM_USE 0x00000002 ++#define ATOM_DEVICE_I2C_ID_IS_FOR_SDVO_USE 0x00000003 //For IGP RS600 ++#define ATOM_DEVICE_I2C_ID_IS_FOR_DAC_SCL 0x00000004 //For IGP RS690 ++ ++#define ATOM_DEVICE_I2C_HARDWARE_CAP_MASK 0x00000080 ++#define ATOM_DEVICE_I2C_HARDWARE_CAP_SHIFT 0x00000007 ++#define ATOM_DEVICE_USES_SOFTWARE_ASSISTED_I2C 0x00000000 ++#define ATOM_DEVICE_USES_HARDWARE_ASSISTED_I2C 0x00000001 ++ ++// usDeviceSupport: ++// Bits0 = 0 - no CRT1 support= 1- CRT1 is supported ++// Bit 1 = 0 - no LCD1 support= 1- LCD1 is supported ++// Bit 2 = 0 - no TV1 support= 1- TV1 is supported ++// Bit 3 = 0 - no DFP1 support= 1- DFP1 is supported ++// Bit 4 = 0 - no CRT2 support= 1- CRT2 is supported ++// Bit 5 = 0 - no LCD2 support= 1- LCD2 is supported ++// Bit 6 = 0 - no TV2 support= 1- TV2 is supported ++// Bit 7 = 0 - no DFP2 support= 1- DFP2 is supported ++// Bit 8 = 0 - no CV support= 1- CV is supported ++// Bit 9 = 0 - no DFP3 support= 1- DFP3 is supported ++// Byte1 (Supported Device Info) ++// Bit 0 = = 0 - no CV support= 1- CV is supported ++// ++// ++ ++// ucI2C_ConfigID ++// [7:0] - I2C LINE Associate ID ++// = 0 - no I2C ++// [7] - HW_Cap = 1, [6:0]=HW assisted I2C ID(HW line selection) ++// = 0, [6:0]=SW assisted I2C ID ++// [6-4] - HW_ENGINE_ID = 1, HW engine for NON multimedia use ++// = 2, HW engine for Multimedia use ++// = 3-7 Reserved for future I2C engines ++// [3-0] - I2C_LINE_MUX = A Mux number when it's HW assisted I2C or GPIO ID when it's SW I2C ++ ++typedef struct _ATOM_I2C_ID_CONFIG ++{ ++#if ATOM_BIG_ENDIAN ++ UCHAR bfHW_Capable:1; ++ UCHAR bfHW_EngineID:3; ++ UCHAR bfI2C_LineMux:4; ++#else ++ UCHAR bfI2C_LineMux:4; ++ UCHAR bfHW_EngineID:3; ++ UCHAR bfHW_Capable:1; ++#endif ++}ATOM_I2C_ID_CONFIG; ++ ++typedef union _ATOM_I2C_ID_CONFIG_ACCESS ++{ ++ ATOM_I2C_ID_CONFIG sbfAccess; ++ UCHAR ucAccess; ++}ATOM_I2C_ID_CONFIG_ACCESS; ++ ++ ++/****************************************************************************/ ++// Structure used in GPIO_I2C_InfoTable ++/****************************************************************************/ ++typedef struct _ATOM_GPIO_I2C_ASSIGMENT ++{ ++ USHORT usClkMaskRegisterIndex; ++ USHORT usClkEnRegisterIndex; ++ USHORT usClkY_RegisterIndex; ++ USHORT usClkA_RegisterIndex; ++ USHORT usDataMaskRegisterIndex; ++ USHORT usDataEnRegisterIndex; ++ USHORT usDataY_RegisterIndex; ++ USHORT usDataA_RegisterIndex; ++ ATOM_I2C_ID_CONFIG_ACCESS sucI2cId; ++ UCHAR ucClkMaskShift; ++ UCHAR ucClkEnShift; ++ UCHAR ucClkY_Shift; ++ UCHAR ucClkA_Shift; ++ UCHAR ucDataMaskShift; ++ UCHAR ucDataEnShift; ++ UCHAR ucDataY_Shift; ++ UCHAR ucDataA_Shift; ++ UCHAR ucReserved1; ++ UCHAR ucReserved2; ++}ATOM_GPIO_I2C_ASSIGMENT; ++ ++typedef struct _ATOM_GPIO_I2C_INFO ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ ATOM_GPIO_I2C_ASSIGMENT asGPIO_Info[ATOM_MAX_SUPPORTED_DEVICE]; ++}ATOM_GPIO_I2C_INFO; ++ ++/****************************************************************************/ ++// Common Structure used in other structures ++/****************************************************************************/ ++ ++#ifndef _H2INC ++ ++//Please don't add or expand this bitfield structure below, this one will retire soon.! ++typedef struct _ATOM_MODE_MISC_INFO ++{ ++#if ATOM_BIG_ENDIAN ++ USHORT Reserved:6; ++ USHORT RGB888:1; ++ USHORT DoubleClock:1; ++ USHORT Interlace:1; ++ USHORT CompositeSync:1; ++ USHORT V_ReplicationBy2:1; ++ USHORT H_ReplicationBy2:1; ++ USHORT VerticalCutOff:1; ++ USHORT VSyncPolarity:1; //0=Active High, 1=Active Low ++ USHORT HSyncPolarity:1; //0=Active High, 1=Active Low ++ USHORT HorizontalCutOff:1; ++#else ++ USHORT HorizontalCutOff:1; ++ USHORT HSyncPolarity:1; //0=Active High, 1=Active Low ++ USHORT VSyncPolarity:1; //0=Active High, 1=Active Low ++ USHORT VerticalCutOff:1; ++ USHORT H_ReplicationBy2:1; ++ USHORT V_ReplicationBy2:1; ++ USHORT CompositeSync:1; ++ USHORT Interlace:1; ++ USHORT DoubleClock:1; ++ USHORT RGB888:1; ++ USHORT Reserved:6; ++#endif ++}ATOM_MODE_MISC_INFO; ++ ++typedef union _ATOM_MODE_MISC_INFO_ACCESS ++{ ++ ATOM_MODE_MISC_INFO sbfAccess; ++ USHORT usAccess; ++}ATOM_MODE_MISC_INFO_ACCESS; ++ ++#else ++ ++typedef union _ATOM_MODE_MISC_INFO_ACCESS ++{ ++ USHORT usAccess; ++}ATOM_MODE_MISC_INFO_ACCESS; ++ ++#endif ++ ++// usModeMiscInfo- ++#define ATOM_H_CUTOFF 0x01 ++#define ATOM_HSYNC_POLARITY 0x02 //0=Active High, 1=Active Low ++#define ATOM_VSYNC_POLARITY 0x04 //0=Active High, 1=Active Low ++#define ATOM_V_CUTOFF 0x08 ++#define ATOM_H_REPLICATIONBY2 0x10 ++#define ATOM_V_REPLICATIONBY2 0x20 ++#define ATOM_COMPOSITESYNC 0x40 ++#define ATOM_INTERLACE 0x80 ++#define ATOM_DOUBLE_CLOCK_MODE 0x100 ++#define ATOM_RGB888_MODE 0x200 ++ ++//usRefreshRate- ++#define ATOM_REFRESH_43 43 ++#define ATOM_REFRESH_47 47 ++#define ATOM_REFRESH_56 56 ++#define ATOM_REFRESH_60 60 ++#define ATOM_REFRESH_65 65 ++#define ATOM_REFRESH_70 70 ++#define ATOM_REFRESH_72 72 ++#define ATOM_REFRESH_75 75 ++#define ATOM_REFRESH_85 85 ++ ++// ATOM_MODE_TIMING data are exactly the same as VESA timing data. ++// Translation from EDID to ATOM_MODE_TIMING, use the following formula. ++// ++// VESA_HTOTAL = VESA_ACTIVE + 2* VESA_BORDER + VESA_BLANK ++// = EDID_HA + EDID_HBL ++// VESA_HDISP = VESA_ACTIVE = EDID_HA ++// VESA_HSYNC_START = VESA_ACTIVE + VESA_BORDER + VESA_FRONT_PORCH ++// = EDID_HA + EDID_HSO ++// VESA_HSYNC_WIDTH = VESA_HSYNC_TIME = EDID_HSPW ++// VESA_BORDER = EDID_BORDER ++ ++/****************************************************************************/ ++// Structure used in SetCRTC_UsingDTDTimingTable ++/****************************************************************************/ ++typedef struct _SET_CRTC_USING_DTD_TIMING_PARAMETERS ++{ ++ USHORT usH_Size; ++ USHORT usH_Blanking_Time; ++ USHORT usV_Size; ++ USHORT usV_Blanking_Time; ++ USHORT usH_SyncOffset; ++ USHORT usH_SyncWidth; ++ USHORT usV_SyncOffset; ++ USHORT usV_SyncWidth; ++ ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo; ++ UCHAR ucH_Border; // From DFP EDID ++ UCHAR ucV_Border; ++ UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2 ++ UCHAR ucPadding[3]; ++}SET_CRTC_USING_DTD_TIMING_PARAMETERS; ++ ++/****************************************************************************/ ++// Structure used in SetCRTC_TimingTable ++/****************************************************************************/ ++typedef struct _SET_CRTC_TIMING_PARAMETERS ++{ ++ USHORT usH_Total; // horizontal total ++ USHORT usH_Disp; // horizontal display ++ USHORT usH_SyncStart; // horozontal Sync start ++ USHORT usH_SyncWidth; // horizontal Sync width ++ USHORT usV_Total; // vertical total ++ USHORT usV_Disp; // vertical display ++ USHORT usV_SyncStart; // vertical Sync start ++ USHORT usV_SyncWidth; // vertical Sync width ++ ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo; ++ UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2 ++ UCHAR ucOverscanRight; // right ++ UCHAR ucOverscanLeft; // left ++ UCHAR ucOverscanBottom; // bottom ++ UCHAR ucOverscanTop; // top ++ UCHAR ucReserved; ++}SET_CRTC_TIMING_PARAMETERS; ++#define SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION SET_CRTC_TIMING_PARAMETERS ++ ++/****************************************************************************/ ++// Structure used in StandardVESA_TimingTable ++// AnalogTV_InfoTable ++// ComponentVideoInfoTable ++/****************************************************************************/ ++typedef struct _ATOM_MODE_TIMING ++{ ++ USHORT usCRTC_H_Total; ++ USHORT usCRTC_H_Disp; ++ USHORT usCRTC_H_SyncStart; ++ USHORT usCRTC_H_SyncWidth; ++ USHORT usCRTC_V_Total; ++ USHORT usCRTC_V_Disp; ++ USHORT usCRTC_V_SyncStart; ++ USHORT usCRTC_V_SyncWidth; ++ USHORT usPixelClock; //in 10Khz unit ++ ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo; ++ USHORT usCRTC_OverscanRight; ++ USHORT usCRTC_OverscanLeft; ++ USHORT usCRTC_OverscanBottom; ++ USHORT usCRTC_OverscanTop; ++ USHORT usReserve; ++ UCHAR ucInternalModeNumber; ++ UCHAR ucRefreshRate; ++}ATOM_MODE_TIMING; ++ ++typedef struct _ATOM_DTD_FORMAT ++{ ++ USHORT usPixClk; ++ USHORT usHActive; ++ USHORT usHBlanking_Time; ++ USHORT usVActive; ++ USHORT usVBlanking_Time; ++ USHORT usHSyncOffset; ++ USHORT usHSyncWidth; ++ USHORT usVSyncOffset; ++ USHORT usVSyncWidth; ++ USHORT usImageHSize; ++ USHORT usImageVSize; ++ UCHAR ucHBorder; ++ UCHAR ucVBorder; ++ ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo; ++ UCHAR ucInternalModeNumber; ++ UCHAR ucRefreshRate; ++}ATOM_DTD_FORMAT; ++ ++/****************************************************************************/ ++// Structure used in LVDS_InfoTable ++// * Need a document to describe this table ++/****************************************************************************/ ++#define SUPPORTED_LCD_REFRESHRATE_30Hz 0x0004 ++#define SUPPORTED_LCD_REFRESHRATE_40Hz 0x0008 ++#define SUPPORTED_LCD_REFRESHRATE_50Hz 0x0010 ++#define SUPPORTED_LCD_REFRESHRATE_60Hz 0x0020 ++ ++//Once DAL sees this CAP is set, it will read EDID from LCD on its own instead of using sLCDTiming in ATOM_LVDS_INFO_V12. ++//Other entries in ATOM_LVDS_INFO_V12 are still valid/useful to DAL ++#define LCDPANEL_CAP_READ_EDID 0x1 ++ ++//ucTableFormatRevision=1 ++//ucTableContentRevision=1 ++typedef struct _ATOM_LVDS_INFO ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ ATOM_DTD_FORMAT sLCDTiming; ++ USHORT usModePatchTableOffset; ++ USHORT usSupportedRefreshRate; //Refer to panel info table in ATOMBIOS extension Spec. ++ USHORT usOffDelayInMs; ++ UCHAR ucPowerSequenceDigOntoDEin10Ms; ++ UCHAR ucPowerSequenceDEtoBLOnin10Ms; ++ UCHAR ucLVDS_Misc; // Bit0:{=0:single, =1:dual},Bit1 {=0:666RGB, =1:888RGB},Bit2:3:{Grey level} ++ // Bit4:{=0:LDI format for RGB888, =1 FPDI format for RGB888} ++ // Bit5:{=0:Spatial Dithering disabled;1 Spatial Dithering enabled} ++ // Bit6:{=0:Temporal Dithering disabled;1 Temporal Dithering enabled} ++ UCHAR ucPanelDefaultRefreshRate; ++ UCHAR ucPanelIdentification; ++ UCHAR ucSS_Id; ++}ATOM_LVDS_INFO; ++ ++//ucTableFormatRevision=1 ++//ucTableContentRevision=2 ++typedef struct _ATOM_LVDS_INFO_V12 ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ ATOM_DTD_FORMAT sLCDTiming; ++ USHORT usExtInfoTableOffset; ++ USHORT usSupportedRefreshRate; //Refer to panel info table in ATOMBIOS extension Spec. ++ USHORT usOffDelayInMs; ++ UCHAR ucPowerSequenceDigOntoDEin10Ms; ++ UCHAR ucPowerSequenceDEtoBLOnin10Ms; ++ UCHAR ucLVDS_Misc; // Bit0:{=0:single, =1:dual},Bit1 {=0:666RGB, =1:888RGB},Bit2:3:{Grey level} ++ // Bit4:{=0:LDI format for RGB888, =1 FPDI format for RGB888} ++ // Bit5:{=0:Spatial Dithering disabled;1 Spatial Dithering enabled} ++ // Bit6:{=0:Temporal Dithering disabled;1 Temporal Dithering enabled} ++ UCHAR ucPanelDefaultRefreshRate; ++ UCHAR ucPanelIdentification; ++ UCHAR ucSS_Id; ++ USHORT usLCDVenderID; ++ USHORT usLCDProductID; ++ UCHAR ucLCDPanel_SpecialHandlingCap; ++ UCHAR ucPanelInfoSize; // start from ATOM_DTD_FORMAT to end of panel info, include ExtInfoTable ++ UCHAR ucReserved[2]; ++}ATOM_LVDS_INFO_V12; ++ ++#define ATOM_LVDS_INFO_LAST ATOM_LVDS_INFO_V12 ++ ++typedef struct _ATOM_PATCH_RECORD_MODE ++{ ++ UCHAR ucRecordType; ++ USHORT usHDisp; ++ USHORT usVDisp; ++}ATOM_PATCH_RECORD_MODE; ++ ++typedef struct _ATOM_LCD_RTS_RECORD ++{ ++ UCHAR ucRecordType; ++ UCHAR ucRTSValue; ++}ATOM_LCD_RTS_RECORD; ++ ++//!! If the record below exits, it shoud always be the first record for easy use in command table!!! ++typedef struct _ATOM_LCD_MODE_CONTROL_CAP ++{ ++ UCHAR ucRecordType; ++ USHORT usLCDCap; ++}ATOM_LCD_MODE_CONTROL_CAP; ++ ++#define LCD_MODE_CAP_BL_OFF 1 ++#define LCD_MODE_CAP_CRTC_OFF 2 ++#define LCD_MODE_CAP_PANEL_OFF 4 ++ ++typedef struct _ATOM_FAKE_EDID_PATCH_RECORD ++{ ++ UCHAR ucRecordType; ++ UCHAR ucFakeEDIDLength; ++ UCHAR ucFakeEDIDString[1]; // This actually has ucFakeEdidLength elements. ++} ATOM_FAKE_EDID_PATCH_RECORD; ++ ++typedef struct _ATOM_PANEL_RESOLUTION_PATCH_RECORD ++{ ++ UCHAR ucRecordType; ++ USHORT usHSize; ++ USHORT usVSize; ++}ATOM_PANEL_RESOLUTION_PATCH_RECORD; ++ ++#define LCD_MODE_PATCH_RECORD_MODE_TYPE 1 ++#define LCD_RTS_RECORD_TYPE 2 ++#define LCD_CAP_RECORD_TYPE 3 ++#define LCD_FAKE_EDID_PATCH_RECORD_TYPE 4 ++#define LCD_PANEL_RESOLUTION_RECORD_TYPE 5 ++#define ATOM_RECORD_END_TYPE 0xFF ++ ++/****************************Spread Spectrum Info Table Definitions **********************/ ++ ++//ucTableFormatRevision=1 ++//ucTableContentRevision=2 ++typedef struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT ++{ ++ USHORT usSpreadSpectrumPercentage; ++ UCHAR ucSpreadSpectrumType; //Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD ++ UCHAR ucSS_Step; ++ UCHAR ucSS_Delay; ++ UCHAR ucSS_Id; ++ UCHAR ucRecommandedRef_Div; ++ UCHAR ucSS_Range; //it was reserved for V11 ++}ATOM_SPREAD_SPECTRUM_ASSIGNMENT; ++ ++#define ATOM_MAX_SS_ENTRY 16 ++#define ATOM_DP_SS_ID1 0x0f1 // SS modulation freq=30k ++#define ATOM_DP_SS_ID2 0x0f2 // SS modulation freq=33k ++ ++ ++#define ATOM_SS_DOWN_SPREAD_MODE_MASK 0x00000000 ++#define ATOM_SS_DOWN_SPREAD_MODE 0x00000000 ++#define ATOM_SS_CENTRE_SPREAD_MODE_MASK 0x00000001 ++#define ATOM_SS_CENTRE_SPREAD_MODE 0x00000001 ++#define ATOM_INTERNAL_SS_MASK 0x00000000 ++#define ATOM_EXTERNAL_SS_MASK 0x00000002 ++#define EXEC_SS_STEP_SIZE_SHIFT 2 ++#define EXEC_SS_DELAY_SHIFT 4 ++#define ACTIVEDATA_TO_BLON_DELAY_SHIFT 4 ++ ++typedef struct _ATOM_SPREAD_SPECTRUM_INFO ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ ATOM_SPREAD_SPECTRUM_ASSIGNMENT asSS_Info[ATOM_MAX_SS_ENTRY]; ++}ATOM_SPREAD_SPECTRUM_INFO; ++ ++/****************************************************************************/ ++// Structure used in AnalogTV_InfoTable (Top level) ++/****************************************************************************/ ++//ucTVBootUpDefaultStd definiton: ++ ++//ATOM_TV_NTSC 1 ++//ATOM_TV_NTSCJ 2 ++//ATOM_TV_PAL 3 ++//ATOM_TV_PALM 4 ++//ATOM_TV_PALCN 5 ++//ATOM_TV_PALN 6 ++//ATOM_TV_PAL60 7 ++//ATOM_TV_SECAM 8 ++ ++//ucTVSuppportedStd definition: ++#define NTSC_SUPPORT 0x1 ++#define NTSCJ_SUPPORT 0x2 ++ ++#define PAL_SUPPORT 0x4 ++#define PALM_SUPPORT 0x8 ++#define PALCN_SUPPORT 0x10 ++#define PALN_SUPPORT 0x20 ++#define PAL60_SUPPORT 0x40 ++#define SECAM_SUPPORT 0x80 ++ ++#define MAX_SUPPORTED_TV_TIMING 2 ++ ++typedef struct _ATOM_ANALOG_TV_INFO ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ UCHAR ucTV_SupportedStandard; ++ UCHAR ucTV_BootUpDefaultStandard; ++ UCHAR ucExt_TV_ASIC_ID; ++ UCHAR ucExt_TV_ASIC_SlaveAddr; ++ /*ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_TV_TIMING];*/ ++ ATOM_MODE_TIMING aModeTimings[MAX_SUPPORTED_TV_TIMING]; ++}ATOM_ANALOG_TV_INFO; ++ ++ ++/**************************************************************************/ ++// VRAM usage and their defintions ++ ++// One chunk of VRAM used by Bios are for HWICON surfaces,EDID data. ++// Current Mode timing and Dail Timing and/or STD timing data EACH device. They can be broken down as below. ++// All the addresses below are the offsets from the frame buffer start.They all MUST be Dword aligned! ++// To driver: The physical address of this memory portion=mmFB_START(4K aligned)+ATOMBIOS_VRAM_USAGE_START_ADDR+ATOM_x_ADDR ++// To Bios: ATOMBIOS_VRAM_USAGE_START_ADDR+ATOM_x_ADDR->MM_INDEX ++ ++#ifndef VESA_MEMORY_IN_64K_BLOCK ++#define VESA_MEMORY_IN_64K_BLOCK 0x100 //256*64K=16Mb (Max. VESA memory is 16Mb!) ++#endif ++ ++#define ATOM_EDID_RAW_DATASIZE 256 //In Bytes ++#define ATOM_HWICON_SURFACE_SIZE 4096 //In Bytes ++#define ATOM_HWICON_INFOTABLE_SIZE 32 ++#define MAX_DTD_MODE_IN_VRAM 6 ++#define ATOM_DTD_MODE_SUPPORT_TBL_SIZE (MAX_DTD_MODE_IN_VRAM*28) //28= (SIZEOF ATOM_DTD_FORMAT) ++#define ATOM_STD_MODE_SUPPORT_TBL_SIZE 32*8 //32 is a predefined number,8= (SIZEOF ATOM_STD_FORMAT) ++#define DFP_ENCODER_TYPE_OFFSET 0x80 ++#define DP_ENCODER_LANE_NUM_OFFSET 0x84 ++#define DP_ENCODER_LINK_RATE_OFFSET 0x88 ++ ++#define ATOM_HWICON1_SURFACE_ADDR 0 ++#define ATOM_HWICON2_SURFACE_ADDR (ATOM_HWICON1_SURFACE_ADDR + ATOM_HWICON_SURFACE_SIZE) ++#define ATOM_HWICON_INFOTABLE_ADDR (ATOM_HWICON2_SURFACE_ADDR + ATOM_HWICON_SURFACE_SIZE) ++#define ATOM_CRT1_EDID_ADDR (ATOM_HWICON_INFOTABLE_ADDR + ATOM_HWICON_INFOTABLE_SIZE) ++#define ATOM_CRT1_DTD_MODE_TBL_ADDR (ATOM_CRT1_EDID_ADDR + ATOM_EDID_RAW_DATASIZE) ++#define ATOM_CRT1_STD_MODE_TBL_ADDR (ATOM_CRT1_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE) ++ ++#define ATOM_LCD1_EDID_ADDR (ATOM_CRT1_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE) ++#define ATOM_LCD1_DTD_MODE_TBL_ADDR (ATOM_LCD1_EDID_ADDR + ATOM_EDID_RAW_DATASIZE) ++#define ATOM_LCD1_STD_MODE_TBL_ADDR (ATOM_LCD1_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE) ++ ++#define ATOM_TV1_DTD_MODE_TBL_ADDR (ATOM_LCD1_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE) ++ ++#define ATOM_DFP1_EDID_ADDR (ATOM_TV1_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE) ++#define ATOM_DFP1_DTD_MODE_TBL_ADDR (ATOM_DFP1_EDID_ADDR + ATOM_EDID_RAW_DATASIZE) ++#define ATOM_DFP1_STD_MODE_TBL_ADDR (ATOM_DFP1_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE) ++ ++#define ATOM_CRT2_EDID_ADDR (ATOM_DFP1_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE) ++#define ATOM_CRT2_DTD_MODE_TBL_ADDR (ATOM_CRT2_EDID_ADDR + ATOM_EDID_RAW_DATASIZE) ++#define ATOM_CRT2_STD_MODE_TBL_ADDR (ATOM_CRT2_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE) ++ ++#define ATOM_LCD2_EDID_ADDR (ATOM_CRT2_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE) ++#define ATOM_LCD2_DTD_MODE_TBL_ADDR (ATOM_LCD2_EDID_ADDR + ATOM_EDID_RAW_DATASIZE) ++#define ATOM_LCD2_STD_MODE_TBL_ADDR (ATOM_LCD2_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE) ++ ++#define ATOM_TV2_EDID_ADDR (ATOM_LCD2_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE) ++#define ATOM_TV2_DTD_MODE_TBL_ADDR (ATOM_TV2_EDID_ADDR + ATOM_EDID_RAW_DATASIZE) ++#define ATOM_TV2_STD_MODE_TBL_ADDR (ATOM_TV2_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE) ++ ++#define ATOM_DFP2_EDID_ADDR (ATOM_TV2_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE) ++#define ATOM_DFP2_DTD_MODE_TBL_ADDR (ATOM_DFP2_EDID_ADDR + ATOM_EDID_RAW_DATASIZE) ++#define ATOM_DFP2_STD_MODE_TBL_ADDR (ATOM_DFP2_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE) ++ ++#define ATOM_CV_EDID_ADDR (ATOM_DFP2_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE) ++#define ATOM_CV_DTD_MODE_TBL_ADDR (ATOM_CV_EDID_ADDR + ATOM_EDID_RAW_DATASIZE) ++#define ATOM_CV_STD_MODE_TBL_ADDR (ATOM_CV_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE) ++ ++#define ATOM_DFP3_EDID_ADDR (ATOM_CV_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE) ++#define ATOM_DFP3_DTD_MODE_TBL_ADDR (ATOM_DFP3_EDID_ADDR + ATOM_EDID_RAW_DATASIZE) ++#define ATOM_DFP3_STD_MODE_TBL_ADDR (ATOM_DFP3_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE) ++ ++#define ATOM_DFP4_EDID_ADDR (ATOM_DFP3_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE) ++#define ATOM_DFP4_DTD_MODE_TBL_ADDR (ATOM_DFP4_EDID_ADDR + ATOM_EDID_RAW_DATASIZE) ++#define ATOM_DFP4_STD_MODE_TBL_ADDR (ATOM_DFP4_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE) ++ ++#define ATOM_DFP5_EDID_ADDR (ATOM_DFP4_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE) ++#define ATOM_DFP5_DTD_MODE_TBL_ADDR (ATOM_DFP5_EDID_ADDR + ATOM_EDID_RAW_DATASIZE) ++#define ATOM_DFP5_STD_MODE_TBL_ADDR (ATOM_DFP5_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE) ++ ++#define ATOM_DP_TRAINING_TBL_ADDR (ATOM_DFP5_STD_MODE_TBL_ADDR+ATOM_STD_MODE_SUPPORT_TBL_SIZE) ++ ++#define ATOM_STACK_STORAGE_START (ATOM_DP_TRAINING_TBL_ADDR+256) ++#define ATOM_STACK_STORAGE_END ATOM_STACK_STORAGE_START+512 ++ ++//The size below is in Kb! ++#define ATOM_VRAM_RESERVE_SIZE ((((ATOM_STACK_STORAGE_END - ATOM_HWICON1_SURFACE_ADDR)>>10)+4)&0xFFFC) ++ ++#define ATOM_VRAM_OPERATION_FLAGS_MASK 0xC0000000L ++#define ATOM_VRAM_OPERATION_FLAGS_SHIFT 30 ++#define ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION 0x1 ++#define ATOM_VRAM_BLOCK_NEEDS_RESERVATION 0x0 ++ ++/***********************************************************************************/ ++// Structure used in VRAM_UsageByFirmwareTable ++// Note1: This table is filled by SetBiosReservationStartInFB in CoreCommSubs.asm ++// at running time. ++// note2: From RV770, the memory is more than 32bit addressable, so we will change ++// ucTableFormatRevision=1,ucTableContentRevision=4, the strcuture remains ++// exactly same as 1.1 and 1.2 (1.3 is never in use), but ulStartAddrUsedByFirmware ++// (in offset to start of memory address) is KB aligned instead of byte aligend. ++/***********************************************************************************/ ++#define ATOM_MAX_FIRMWARE_VRAM_USAGE_INFO 1 ++ ++typedef struct _ATOM_FIRMWARE_VRAM_RESERVE_INFO ++{ ++ ULONG ulStartAddrUsedByFirmware; ++ USHORT usFirmwareUseInKb; ++ USHORT usReserved; ++}ATOM_FIRMWARE_VRAM_RESERVE_INFO; ++ ++typedef struct _ATOM_VRAM_USAGE_BY_FIRMWARE ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ ATOM_FIRMWARE_VRAM_RESERVE_INFO asFirmwareVramReserveInfo[ATOM_MAX_FIRMWARE_VRAM_USAGE_INFO]; ++}ATOM_VRAM_USAGE_BY_FIRMWARE; ++ ++/****************************************************************************/ ++// Structure used in GPIO_Pin_LUTTable ++/****************************************************************************/ ++typedef struct _ATOM_GPIO_PIN_ASSIGNMENT ++{ ++ USHORT usGpioPin_AIndex; ++ UCHAR ucGpioPinBitShift; ++ UCHAR ucGPIO_ID; ++}ATOM_GPIO_PIN_ASSIGNMENT; ++ ++typedef struct _ATOM_GPIO_PIN_LUT ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ ATOM_GPIO_PIN_ASSIGNMENT asGPIO_Pin[1]; ++}ATOM_GPIO_PIN_LUT; ++ ++/****************************************************************************/ ++// Structure used in ComponentVideoInfoTable ++/****************************************************************************/ ++#define GPIO_PIN_ACTIVE_HIGH 0x1 ++ ++#define MAX_SUPPORTED_CV_STANDARDS 5 ++ ++// definitions for ATOM_D_INFO.ucSettings ++#define ATOM_GPIO_SETTINGS_BITSHIFT_MASK 0x1F // [4:0] ++#define ATOM_GPIO_SETTINGS_RESERVED_MASK 0x60 // [6:5] = must be zeroed out ++#define ATOM_GPIO_SETTINGS_ACTIVE_MASK 0x80 // [7] ++ ++typedef struct _ATOM_GPIO_INFO ++{ ++ USHORT usAOffset; ++ UCHAR ucSettings; ++ UCHAR ucReserved; ++}ATOM_GPIO_INFO; ++ ++// definitions for ATOM_COMPONENT_VIDEO_INFO.ucMiscInfo (bit vector) ++#define ATOM_CV_RESTRICT_FORMAT_SELECTION 0x2 ++ ++// definitions for ATOM_COMPONENT_VIDEO_INFO.uc480i/uc480p/uc720p/uc1080i ++#define ATOM_GPIO_DEFAULT_MODE_EN 0x80 //[7]; ++#define ATOM_GPIO_SETTING_PERMODE_MASK 0x7F //[6:0] ++ ++// definitions for ATOM_COMPONENT_VIDEO_INFO.ucLetterBoxMode ++//Line 3 out put 5V. ++#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_A 0x01 //represent gpio 3 state for 16:9 ++#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_B 0x02 //represent gpio 4 state for 16:9 ++#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_SHIFT 0x0 ++ ++//Line 3 out put 2.2V ++#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_A 0x04 //represent gpio 3 state for 4:3 Letter box ++#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_B 0x08 //represent gpio 4 state for 4:3 Letter box ++#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_SHIFT 0x2 ++ ++//Line 3 out put 0V ++#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_A 0x10 //represent gpio 3 state for 4:3 ++#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_B 0x20 //represent gpio 4 state for 4:3 ++#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_SHIFT 0x4 ++ ++#define ATOM_CV_LINE3_ASPECTRATIO_MASK 0x3F // bit [5:0] ++ ++#define ATOM_CV_LINE3_ASPECTRATIO_EXIST 0x80 //bit 7 ++ ++//GPIO bit index in gpio setting per mode value, also represend the block no. in gpio blocks. ++#define ATOM_GPIO_INDEX_LINE3_ASPECRATIO_GPIO_A 3 //bit 3 in uc480i/uc480p/uc720p/uc1080i, which represend the default gpio bit setting for the mode. ++#define ATOM_GPIO_INDEX_LINE3_ASPECRATIO_GPIO_B 4 //bit 4 in uc480i/uc480p/uc720p/uc1080i, which represend the default gpio bit setting for the mode. ++ ++ ++typedef struct _ATOM_COMPONENT_VIDEO_INFO ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ USHORT usMask_PinRegisterIndex; ++ USHORT usEN_PinRegisterIndex; ++ USHORT usY_PinRegisterIndex; ++ USHORT usA_PinRegisterIndex; ++ UCHAR ucBitShift; ++ UCHAR ucPinActiveState; //ucPinActiveState: Bit0=1 active high, =0 active low ++ ATOM_DTD_FORMAT sReserved; // must be zeroed out ++ UCHAR ucMiscInfo; ++ UCHAR uc480i; ++ UCHAR uc480p; ++ UCHAR uc720p; ++ UCHAR uc1080i; ++ UCHAR ucLetterBoxMode; ++ UCHAR ucReserved[3]; ++ UCHAR ucNumOfWbGpioBlocks; //For Component video D-Connector support. If zere, NTSC type connector ++ ATOM_GPIO_INFO aWbGpioStateBlock[MAX_SUPPORTED_CV_STANDARDS]; ++ ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_CV_STANDARDS]; ++}ATOM_COMPONENT_VIDEO_INFO; ++ ++//ucTableFormatRevision=2 ++//ucTableContentRevision=1 ++typedef struct _ATOM_COMPONENT_VIDEO_INFO_V21 ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ UCHAR ucMiscInfo; ++ UCHAR uc480i; ++ UCHAR uc480p; ++ UCHAR uc720p; ++ UCHAR uc1080i; ++ UCHAR ucReserved; ++ UCHAR ucLetterBoxMode; ++ UCHAR ucNumOfWbGpioBlocks; //For Component video D-Connector support. If zere, NTSC type connector ++ ATOM_GPIO_INFO aWbGpioStateBlock[MAX_SUPPORTED_CV_STANDARDS]; ++ ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_CV_STANDARDS]; ++}ATOM_COMPONENT_VIDEO_INFO_V21; ++ ++#define ATOM_COMPONENT_VIDEO_INFO_LAST ATOM_COMPONENT_VIDEO_INFO_V21 ++ ++/****************************************************************************/ ++// Structure used in object_InfoTable ++/****************************************************************************/ ++typedef struct _ATOM_OBJECT_HEADER ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ USHORT usDeviceSupport; ++ USHORT usConnectorObjectTableOffset; ++ USHORT usRouterObjectTableOffset; ++ USHORT usEncoderObjectTableOffset; ++ USHORT usProtectionObjectTableOffset; //only available when Protection block is independent. ++ USHORT usDisplayPathTableOffset; ++}ATOM_OBJECT_HEADER; ++ ++ ++typedef struct _ATOM_DISPLAY_OBJECT_PATH ++{ ++ USHORT usDeviceTag; //supported device ++ USHORT usSize; //the size of ATOM_DISPLAY_OBJECT_PATH ++ USHORT usConnObjectId; //Connector Object ID ++ USHORT usGPUObjectId; //GPU ID ++ USHORT usGraphicObjIds[1]; //1st Encoder Obj source from GPU to last Graphic Obj destinate to connector. ++}ATOM_DISPLAY_OBJECT_PATH; ++ ++typedef struct _ATOM_DISPLAY_OBJECT_PATH_TABLE ++{ ++ UCHAR ucNumOfDispPath; ++ UCHAR ucVersion; ++ UCHAR ucPadding[2]; ++ ATOM_DISPLAY_OBJECT_PATH asDispPath[1]; ++}ATOM_DISPLAY_OBJECT_PATH_TABLE; ++ ++ ++typedef struct _ATOM_OBJECT //each object has this structure ++{ ++ USHORT usObjectID; ++ USHORT usSrcDstTableOffset; ++ USHORT usRecordOffset; //this pointing to a bunch of records defined below ++ USHORT usReserved; ++}ATOM_OBJECT; ++ ++typedef struct _ATOM_OBJECT_TABLE //Above 4 object table offset pointing to a bunch of objects all have this structure ++{ ++ UCHAR ucNumberOfObjects; ++ UCHAR ucPadding[3]; ++ ATOM_OBJECT asObjects[1]; ++}ATOM_OBJECT_TABLE; ++ ++typedef struct _ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT //usSrcDstTableOffset pointing to this structure ++{ ++ UCHAR ucNumberOfSrc; ++ USHORT usSrcObjectID[1]; ++ UCHAR ucNumberOfDst; ++ USHORT usDstObjectID[1]; ++}ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT; ++ ++ ++//Related definitions, all records are differnt but they have a commond header ++typedef struct _ATOM_COMMON_RECORD_HEADER ++{ ++ UCHAR ucRecordType; //An emun to indicate the record type ++ UCHAR ucRecordSize; //The size of the whole record in byte ++}ATOM_COMMON_RECORD_HEADER; ++ ++ ++#define ATOM_I2C_RECORD_TYPE 1 ++#define ATOM_HPD_INT_RECORD_TYPE 2 ++#define ATOM_OUTPUT_PROTECTION_RECORD_TYPE 3 ++#define ATOM_CONNECTOR_DEVICE_TAG_RECORD_TYPE 4 ++#define ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD_TYPE 5 //Obsolete, switch to use GPIO_CNTL_RECORD_TYPE ++#define ATOM_ENCODER_FPGA_CONTROL_RECORD_TYPE 6 //Obsolete, switch to use GPIO_CNTL_RECORD_TYPE ++#define ATOM_CONNECTOR_CVTV_SHARE_DIN_RECORD_TYPE 7 ++#define ATOM_JTAG_RECORD_TYPE 8 //Obsolete, switch to use GPIO_CNTL_RECORD_TYPE ++#define ATOM_OBJECT_GPIO_CNTL_RECORD_TYPE 9 ++#define ATOM_ENCODER_DVO_CF_RECORD_TYPE 10 ++#define ATOM_CONNECTOR_CF_RECORD_TYPE 11 ++#define ATOM_CONNECTOR_HARDCODE_DTD_RECORD_TYPE 12 ++#define ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD_TYPE 13 ++#define ATOM_ROUTER_DDC_PATH_SELECT_RECORD_TYPE 14 ++#define ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD_TYPE 15 ++ ++//Must be updated when new record type is added,equal to that record definition! ++#define ATOM_MAX_OBJECT_RECORD_NUMBER ATOM_CONNECTOR_CF_RECORD_TYPE ++ ++typedef struct _ATOM_I2C_RECORD ++{ ++ ATOM_COMMON_RECORD_HEADER sheader; ++ ATOM_I2C_ID_CONFIG sucI2cId; ++ UCHAR ucI2CAddr; //The slave address, it's 0 when the record is attached to connector for DDC ++}ATOM_I2C_RECORD; ++ ++typedef struct _ATOM_HPD_INT_RECORD ++{ ++ ATOM_COMMON_RECORD_HEADER sheader; ++ UCHAR ucHPDIntGPIOID; //Corresponding block in GPIO_PIN_INFO table gives the pin info ++ UCHAR ucPluggged_PinState; ++}ATOM_HPD_INT_RECORD; ++ ++ ++typedef struct _ATOM_OUTPUT_PROTECTION_RECORD ++{ ++ ATOM_COMMON_RECORD_HEADER sheader; ++ UCHAR ucProtectionFlag; ++ UCHAR ucReserved; ++}ATOM_OUTPUT_PROTECTION_RECORD; ++ ++typedef struct _ATOM_CONNECTOR_DEVICE_TAG ++{ ++ ULONG ulACPIDeviceEnum; //Reserved for now ++ USHORT usDeviceID; //This Id is same as "ATOM_DEVICE_XXX_SUPPORT" ++ USHORT usPadding; ++}ATOM_CONNECTOR_DEVICE_TAG; ++ ++typedef struct _ATOM_CONNECTOR_DEVICE_TAG_RECORD ++{ ++ ATOM_COMMON_RECORD_HEADER sheader; ++ UCHAR ucNumberOfDevice; ++ UCHAR ucReserved; ++ ATOM_CONNECTOR_DEVICE_TAG asDeviceTag[1]; //This Id is same as "ATOM_DEVICE_XXX_SUPPORT", 1 is only for allocation ++}ATOM_CONNECTOR_DEVICE_TAG_RECORD; ++ ++ ++typedef struct _ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD ++{ ++ ATOM_COMMON_RECORD_HEADER sheader; ++ UCHAR ucConfigGPIOID; ++ UCHAR ucConfigGPIOState; //Set to 1 when it's active high to enable external flow in ++ UCHAR ucFlowinGPIPID; ++ UCHAR ucExtInGPIPID; ++}ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD; ++ ++typedef struct _ATOM_ENCODER_FPGA_CONTROL_RECORD ++{ ++ ATOM_COMMON_RECORD_HEADER sheader; ++ UCHAR ucCTL1GPIO_ID; ++ UCHAR ucCTL1GPIOState; //Set to 1 when it's active high ++ UCHAR ucCTL2GPIO_ID; ++ UCHAR ucCTL2GPIOState; //Set to 1 when it's active high ++ UCHAR ucCTL3GPIO_ID; ++ UCHAR ucCTL3GPIOState; //Set to 1 when it's active high ++ UCHAR ucCTLFPGA_IN_ID; ++ UCHAR ucPadding[3]; ++}ATOM_ENCODER_FPGA_CONTROL_RECORD; ++ ++typedef struct _ATOM_CONNECTOR_CVTV_SHARE_DIN_RECORD ++{ ++ ATOM_COMMON_RECORD_HEADER sheader; ++ UCHAR ucGPIOID; //Corresponding block in GPIO_PIN_INFO table gives the pin info ++ UCHAR ucTVActiveState; //Indicating when the pin==0 or 1 when TV is connected ++}ATOM_CONNECTOR_CVTV_SHARE_DIN_RECORD; ++ ++typedef struct _ATOM_JTAG_RECORD ++{ ++ ATOM_COMMON_RECORD_HEADER sheader; ++ UCHAR ucTMSGPIO_ID; ++ UCHAR ucTMSGPIOState; //Set to 1 when it's active high ++ UCHAR ucTCKGPIO_ID; ++ UCHAR ucTCKGPIOState; //Set to 1 when it's active high ++ UCHAR ucTDOGPIO_ID; ++ UCHAR ucTDOGPIOState; //Set to 1 when it's active high ++ UCHAR ucTDIGPIO_ID; ++ UCHAR ucTDIGPIOState; //Set to 1 when it's active high ++ UCHAR ucPadding[2]; ++}ATOM_JTAG_RECORD; ++ ++ ++//The following generic object gpio pin control record type will replace JTAG_RECORD/FPGA_CONTROL_RECORD/DVI_EXT_INPUT_RECORD above gradually ++typedef struct _ATOM_GPIO_PIN_CONTROL_PAIR ++{ ++ UCHAR ucGPIOID; // GPIO_ID, find the corresponding ID in GPIO_LUT table ++ UCHAR ucGPIO_PinState; // Pin state showing how to set-up the pin ++}ATOM_GPIO_PIN_CONTROL_PAIR; ++ ++typedef struct _ATOM_OBJECT_GPIO_CNTL_RECORD ++{ ++ ATOM_COMMON_RECORD_HEADER sheader; ++ UCHAR ucFlags; // Future expnadibility ++ UCHAR ucNumberOfPins; // Number of GPIO pins used to control the object ++ ATOM_GPIO_PIN_CONTROL_PAIR asGpio[1]; // the real gpio pin pair determined by number of pins ucNumberOfPins ++}ATOM_OBJECT_GPIO_CNTL_RECORD; ++ ++//Definitions for GPIO pin state ++#define GPIO_PIN_TYPE_INPUT 0x00 ++#define GPIO_PIN_TYPE_OUTPUT 0x10 ++#define GPIO_PIN_TYPE_HW_CONTROL 0x20 ++ ++//For GPIO_PIN_TYPE_OUTPUT the following is defined ++#define GPIO_PIN_OUTPUT_STATE_MASK 0x01 ++#define GPIO_PIN_OUTPUT_STATE_SHIFT 0 ++#define GPIO_PIN_STATE_ACTIVE_LOW 0x0 ++#define GPIO_PIN_STATE_ACTIVE_HIGH 0x1 ++ ++typedef struct _ATOM_ENCODER_DVO_CF_RECORD ++{ ++ ATOM_COMMON_RECORD_HEADER sheader; ++ ULONG ulStrengthControl; // DVOA strength control for CF ++ UCHAR ucPadding[2]; ++}ATOM_ENCODER_DVO_CF_RECORD; ++ ++// value for ATOM_CONNECTOR_CF_RECORD.ucConnectedDvoBundle ++#define ATOM_CONNECTOR_CF_RECORD_CONNECTED_UPPER12BITBUNDLEA 1 ++#define ATOM_CONNECTOR_CF_RECORD_CONNECTED_LOWER12BITBUNDLEB 2 ++ ++typedef struct _ATOM_CONNECTOR_CF_RECORD ++{ ++ ATOM_COMMON_RECORD_HEADER sheader; ++ USHORT usMaxPixClk; ++ UCHAR ucFlowCntlGpioId; ++ UCHAR ucSwapCntlGpioId; ++ UCHAR ucConnectedDvoBundle; ++ UCHAR ucPadding; ++}ATOM_CONNECTOR_CF_RECORD; ++ ++typedef struct _ATOM_CONNECTOR_HARDCODE_DTD_RECORD ++{ ++ ATOM_COMMON_RECORD_HEADER sheader; ++ ATOM_DTD_FORMAT asTiming; ++}ATOM_CONNECTOR_HARDCODE_DTD_RECORD; ++ ++typedef struct _ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD ++{ ++ ATOM_COMMON_RECORD_HEADER sheader; //ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD_TYPE ++ UCHAR ucSubConnectorType; //CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D|X_ID_DUAL_LINK_DVI_D|HDMI_TYPE_A ++ UCHAR ucReserved; ++}ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD; ++ ++ ++typedef struct _ATOM_ROUTER_DDC_PATH_SELECT_RECORD ++{ ++ ATOM_COMMON_RECORD_HEADER sheader; ++ UCHAR ucMuxType; //decide the number of ucMuxState, =0, no pin state, =1: single state with complement, >1: multiple state ++ UCHAR ucMuxControlPin; ++ UCHAR ucMuxState[2]; //for alligment purpose ++}ATOM_ROUTER_DDC_PATH_SELECT_RECORD; ++ ++typedef struct _ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD ++{ ++ ATOM_COMMON_RECORD_HEADER sheader; ++ UCHAR ucMuxType; ++ UCHAR ucMuxControlPin; ++ UCHAR ucMuxState[2]; //for alligment purpose ++}ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD; ++ ++// define ucMuxType ++#define ATOM_ROUTER_MUX_PIN_STATE_MASK 0x0f ++#define ATOM_ROUTER_MUX_PIN_SINGLE_STATE_COMPLEMENT 0x01 ++ ++/****************************************************************************/ ++// ASIC voltage data table ++/****************************************************************************/ ++typedef struct _ATOM_VOLTAGE_INFO_HEADER ++{ ++ USHORT usVDDCBaseLevel; //In number of 50mv unit ++ USHORT usReserved; //For possible extension table offset ++ UCHAR ucNumOfVoltageEntries; ++ UCHAR ucBytesPerVoltageEntry; ++ UCHAR ucVoltageStep; //Indicating in how many mv increament is one step, 0.5mv unit ++ UCHAR ucDefaultVoltageEntry; ++ UCHAR ucVoltageControlI2cLine; ++ UCHAR ucVoltageControlAddress; ++ UCHAR ucVoltageControlOffset; ++}ATOM_VOLTAGE_INFO_HEADER; ++ ++typedef struct _ATOM_VOLTAGE_INFO ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ ATOM_VOLTAGE_INFO_HEADER viHeader; ++ UCHAR ucVoltageEntries[64]; //64 is for allocation, the actual number of entry is present at ucNumOfVoltageEntries*ucBytesPerVoltageEntry ++}ATOM_VOLTAGE_INFO; ++ ++ ++typedef struct _ATOM_VOLTAGE_FORMULA ++{ ++ USHORT usVoltageBaseLevel; // In number of 1mv unit ++ USHORT usVoltageStep; // Indicating in how many mv increament is one step, 1mv unit ++ UCHAR ucNumOfVoltageEntries; // Number of Voltage Entry, which indicate max Voltage ++ UCHAR ucFlag; // bit0=0 :step is 1mv =1 0.5mv ++ UCHAR ucBaseVID; // if there is no lookup table, VID= BaseVID + ( Vol - BaseLevle ) /VoltageStep ++ UCHAR ucReserved; ++ UCHAR ucVIDAdjustEntries[32]; // 32 is for allocation, the actual number of entry is present at ucNumOfVoltageEntries ++}ATOM_VOLTAGE_FORMULA; ++ ++typedef struct _ATOM_VOLTAGE_CONTROL ++{ ++ UCHAR ucVoltageControlId; //Indicate it is controlled by I2C or GPIO or HW state machine ++ UCHAR ucVoltageControlI2cLine; ++ UCHAR ucVoltageControlAddress; ++ UCHAR ucVoltageControlOffset; ++ USHORT usGpioPin_AIndex; //GPIO_PAD register index ++ UCHAR ucGpioPinBitShift[9]; //at most 8 pin support 255 VIDs, termintate with 0xff ++ UCHAR ucReserved; ++}ATOM_VOLTAGE_CONTROL; ++ ++// Define ucVoltageControlId ++#define VOLTAGE_CONTROLLED_BY_HW 0x00 ++#define VOLTAGE_CONTROLLED_BY_I2C_MASK 0x7F ++#define VOLTAGE_CONTROLLED_BY_GPIO 0x80 ++#define VOLTAGE_CONTROL_ID_LM64 0x01 //I2C control, used for R5xx Core Voltage ++#define VOLTAGE_CONTROL_ID_DAC 0x02 //I2C control, used for R5xx/R6xx MVDDC,MVDDQ or VDDCI ++#define VOLTAGE_CONTROL_ID_VT116xM 0x03 //I2C control, used for R6xx Core Voltage ++#define VOLTAGE_CONTROL_ID_DS4402 0x04 ++ ++typedef struct _ATOM_VOLTAGE_OBJECT ++{ ++ UCHAR ucVoltageType; //Indicate Voltage Source: VDDC, MVDDC, MVDDQ or MVDDCI ++ UCHAR ucSize; //Size of Object ++ ATOM_VOLTAGE_CONTROL asControl; //describ how to control ++ ATOM_VOLTAGE_FORMULA asFormula; //Indicate How to convert real Voltage to VID ++}ATOM_VOLTAGE_OBJECT; ++ ++typedef struct _ATOM_VOLTAGE_OBJECT_INFO ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ ATOM_VOLTAGE_OBJECT asVoltageObj[3]; //Info for Voltage control ++}ATOM_VOLTAGE_OBJECT_INFO; ++ ++typedef struct _ATOM_LEAKID_VOLTAGE ++{ ++ UCHAR ucLeakageId; ++ UCHAR ucReserved; ++ USHORT usVoltage; ++}ATOM_LEAKID_VOLTAGE; ++ ++typedef struct _ATOM_ASIC_PROFILE_VOLTAGE ++{ ++ UCHAR ucProfileId; ++ UCHAR ucReserved; ++ USHORT usSize; ++ USHORT usEfuseSpareStartAddr; ++ USHORT usFuseIndex[8]; //from LSB to MSB, Max 8bit,end of 0xffff if less than 8 efuse id, ++ ATOM_LEAKID_VOLTAGE asLeakVol[2]; //Leakid and relatd voltage ++}ATOM_ASIC_PROFILE_VOLTAGE; ++ ++//ucProfileId ++#define ATOM_ASIC_PROFILE_ID_EFUSE_VOLTAGE 1 ++#define ATOM_ASIC_PROFILE_ID_EFUSE_PERFORMANCE_VOLTAGE 1 ++#define ATOM_ASIC_PROFILE_ID_EFUSE_THERMAL_VOLTAGE 2 ++ ++typedef struct _ATOM_ASIC_PROFILING_INFO ++{ ++ ATOM_COMMON_TABLE_HEADER asHeader; ++ ATOM_ASIC_PROFILE_VOLTAGE asVoltage; ++}ATOM_ASIC_PROFILING_INFO; ++ ++typedef struct _ATOM_POWER_SOURCE_OBJECT ++{ ++ UCHAR ucPwrSrcId; // Power source ++ UCHAR ucPwrSensorType; // GPIO, I2C or none ++ UCHAR ucPwrSensId; // if GPIO detect, it is GPIO id, if I2C detect, it is I2C id ++ UCHAR ucPwrSensSlaveAddr; // Slave address if I2C detect ++ UCHAR ucPwrSensRegIndex; // I2C register Index if I2C detect ++ UCHAR ucPwrSensRegBitMask; // detect which bit is used if I2C detect ++ UCHAR ucPwrSensActiveState; // high active or low active ++ UCHAR ucReserve[3]; // reserve ++ USHORT usSensPwr; // in unit of watt ++}ATOM_POWER_SOURCE_OBJECT; ++ ++typedef struct _ATOM_POWER_SOURCE_INFO ++{ ++ ATOM_COMMON_TABLE_HEADER asHeader; ++ UCHAR asPwrbehave[16]; ++ ATOM_POWER_SOURCE_OBJECT asPwrObj[1]; ++}ATOM_POWER_SOURCE_INFO; ++ ++ ++//Define ucPwrSrcId ++#define POWERSOURCE_PCIE_ID1 0x00 ++#define POWERSOURCE_6PIN_CONNECTOR_ID1 0x01 ++#define POWERSOURCE_8PIN_CONNECTOR_ID1 0x02 ++#define POWERSOURCE_6PIN_CONNECTOR_ID2 0x04 ++#define POWERSOURCE_8PIN_CONNECTOR_ID2 0x08 ++ ++//define ucPwrSensorId ++#define POWER_SENSOR_ALWAYS 0x00 ++#define POWER_SENSOR_GPIO 0x01 ++#define POWER_SENSOR_I2C 0x02 ++ ++/**************************************************************************/ ++// This portion is only used when ext thermal chip or engine/memory clock SS chip is populated on a design ++//Memory SS Info Table ++//Define Memory Clock SS chip ID ++#define ICS91719 1 ++#define ICS91720 2 ++ ++//Define one structure to inform SW a "block of data" writing to external SS chip via I2C protocol ++typedef struct _ATOM_I2C_DATA_RECORD ++{ ++ UCHAR ucNunberOfBytes; //Indicates how many bytes SW needs to write to the external ASIC for one block, besides to "Start" and "Stop" ++ UCHAR ucI2CData[1]; //I2C data in bytes, should be less than 16 bytes usually ++}ATOM_I2C_DATA_RECORD; ++ ++ ++//Define one structure to inform SW how many blocks of data writing to external SS chip via I2C protocol, in addition to other information ++typedef struct _ATOM_I2C_DEVICE_SETUP_INFO ++{ ++ ATOM_I2C_ID_CONFIG_ACCESS sucI2cId; //I2C line and HW/SW assisted cap. ++ UCHAR ucSSChipID; //SS chip being used ++ UCHAR ucSSChipSlaveAddr; //Slave Address to set up this SS chip ++ UCHAR ucNumOfI2CDataRecords; //number of data block ++ ATOM_I2C_DATA_RECORD asI2CData[1]; ++}ATOM_I2C_DEVICE_SETUP_INFO; ++ ++//========================================================================================== ++typedef struct _ATOM_ASIC_MVDD_INFO ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ ATOM_I2C_DEVICE_SETUP_INFO asI2CSetup[1]; ++}ATOM_ASIC_MVDD_INFO; ++ ++//========================================================================================== ++#define ATOM_MCLK_SS_INFO ATOM_ASIC_MVDD_INFO ++ ++//========================================================================================== ++/**************************************************************************/ ++ ++typedef struct _ATOM_ASIC_SS_ASSIGNMENT ++{ ++ ULONG ulTargetClockRange; //Clock Out frequence (VCO ), in unit of 10Khz ++ USHORT usSpreadSpectrumPercentage; //in unit of 0.01% ++ USHORT usSpreadRateInKhz; //in unit of kHz, modulation freq ++ UCHAR ucClockIndication; //Indicate which clock source needs SS ++ UCHAR ucSpreadSpectrumMode; //Bit1=0 Down Spread,=1 Center Spread. ++ UCHAR ucReserved[2]; ++}ATOM_ASIC_SS_ASSIGNMENT; ++ ++//Define ucSpreadSpectrumType ++#define ASIC_INTERNAL_MEMORY_SS 1 ++#define ASIC_INTERNAL_ENGINE_SS 2 ++#define ASIC_INTERNAL_UVD_SS 3 ++ ++typedef struct _ATOM_ASIC_INTERNAL_SS_INFO{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ ATOM_ASIC_SS_ASSIGNMENT asSpreadSpectrum[4]; ++}ATOM_ASIC_INTERNAL_SS_INFO; ++ ++//==============================Scratch Pad Definition Portion=============================== ++#define ATOM_DEVICE_CONNECT_INFO_DEF 0 ++#define ATOM_ROM_LOCATION_DEF 1 ++#define ATOM_TV_STANDARD_DEF 2 ++#define ATOM_ACTIVE_INFO_DEF 3 ++#define ATOM_LCD_INFO_DEF 4 ++#define ATOM_DOS_REQ_INFO_DEF 5 ++#define ATOM_ACC_CHANGE_INFO_DEF 6 ++#define ATOM_DOS_MODE_INFO_DEF 7 ++#define ATOM_I2C_CHANNEL_STATUS_DEF 8 ++#define ATOM_I2C_CHANNEL_STATUS1_DEF 9 ++ ++ ++// BIOS_0_SCRATCH Definition ++#define ATOM_S0_CRT1_MONO 0x00000001L ++#define ATOM_S0_CRT1_COLOR 0x00000002L ++#define ATOM_S0_CRT1_MASK (ATOM_S0_CRT1_MONO+ATOM_S0_CRT1_COLOR) ++ ++#define ATOM_S0_TV1_COMPOSITE_A 0x00000004L ++#define ATOM_S0_TV1_SVIDEO_A 0x00000008L ++#define ATOM_S0_TV1_MASK_A (ATOM_S0_TV1_COMPOSITE_A+ATOM_S0_TV1_SVIDEO_A) ++ ++#define ATOM_S0_CV_A 0x00000010L ++#define ATOM_S0_CV_DIN_A 0x00000020L ++#define ATOM_S0_CV_MASK_A (ATOM_S0_CV_A+ATOM_S0_CV_DIN_A) ++ ++ ++#define ATOM_S0_CRT2_MONO 0x00000100L ++#define ATOM_S0_CRT2_COLOR 0x00000200L ++#define ATOM_S0_CRT2_MASK (ATOM_S0_CRT2_MONO+ATOM_S0_CRT2_COLOR) ++ ++#define ATOM_S0_TV1_COMPOSITE 0x00000400L ++#define ATOM_S0_TV1_SVIDEO 0x00000800L ++#define ATOM_S0_TV1_SCART 0x00004000L ++#define ATOM_S0_TV1_MASK (ATOM_S0_TV1_COMPOSITE+ATOM_S0_TV1_SVIDEO+ATOM_S0_TV1_SCART) ++ ++#define ATOM_S0_CV 0x00001000L ++#define ATOM_S0_CV_DIN 0x00002000L ++#define ATOM_S0_CV_MASK (ATOM_S0_CV+ATOM_S0_CV_DIN) ++ ++#define ATOM_S0_DFP1 0x00010000L ++#define ATOM_S0_DFP2 0x00020000L ++#define ATOM_S0_LCD1 0x00040000L ++#define ATOM_S0_LCD2 0x00080000L ++#define ATOM_S0_TV2 0x00100000L ++#define ATOM_S0_DFP3 0x00200000L ++#define ATOM_S0_DFP4 0x00400000L ++#define ATOM_S0_DFP5 0x00800000L ++ ++#define ATOM_S0_DFP_MASK ATOM_S0_DFP1 | ATOM_S0_DFP2 | ATOM_S0_DFP3 | ATOM_S0_DFP4 | ATOM_S0_DFP5 ++ ++#define ATOM_S0_FAD_REGISTER_BUG 0x02000000L // If set, indicates we are running a PCIE asic with ++ // the FAD/HDP reg access bug. Bit is read by DAL ++ ++#define ATOM_S0_THERMAL_STATE_MASK 0x1C000000L ++#define ATOM_S0_THERMAL_STATE_SHIFT 26 ++ ++#define ATOM_S0_SYSTEM_POWER_STATE_MASK 0xE0000000L ++#define ATOM_S0_SYSTEM_POWER_STATE_SHIFT 29 ++ ++#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_AC 1 ++#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_DC 2 ++#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_LITEAC 3 ++ ++//Byte aligned defintion for BIOS usage ++#define ATOM_S0_CRT1_MONOb0 0x01 ++#define ATOM_S0_CRT1_COLORb0 0x02 ++#define ATOM_S0_CRT1_MASKb0 (ATOM_S0_CRT1_MONOb0+ATOM_S0_CRT1_COLORb0) ++ ++#define ATOM_S0_TV1_COMPOSITEb0 0x04 ++#define ATOM_S0_TV1_SVIDEOb0 0x08 ++#define ATOM_S0_TV1_MASKb0 (ATOM_S0_TV1_COMPOSITEb0+ATOM_S0_TV1_SVIDEOb0) ++ ++#define ATOM_S0_CVb0 0x10 ++#define ATOM_S0_CV_DINb0 0x20 ++#define ATOM_S0_CV_MASKb0 (ATOM_S0_CVb0+ATOM_S0_CV_DINb0) ++ ++#define ATOM_S0_CRT2_MONOb1 0x01 ++#define ATOM_S0_CRT2_COLORb1 0x02 ++#define ATOM_S0_CRT2_MASKb1 (ATOM_S0_CRT2_MONOb1+ATOM_S0_CRT2_COLORb1) ++ ++#define ATOM_S0_TV1_COMPOSITEb1 0x04 ++#define ATOM_S0_TV1_SVIDEOb1 0x08 ++#define ATOM_S0_TV1_SCARTb1 0x40 ++#define ATOM_S0_TV1_MASKb1 (ATOM_S0_TV1_COMPOSITEb1+ATOM_S0_TV1_SVIDEOb1+ATOM_S0_TV1_SCARTb1) ++ ++#define ATOM_S0_CVb1 0x10 ++#define ATOM_S0_CV_DINb1 0x20 ++#define ATOM_S0_CV_MASKb1 (ATOM_S0_CVb1+ATOM_S0_CV_DINb1) ++ ++#define ATOM_S0_DFP1b2 0x01 ++#define ATOM_S0_DFP2b2 0x02 ++#define ATOM_S0_LCD1b2 0x04 ++#define ATOM_S0_LCD2b2 0x08 ++#define ATOM_S0_TV2b2 0x10 ++#define ATOM_S0_DFP3b2 0x20 ++ ++#define ATOM_S0_THERMAL_STATE_MASKb3 0x1C ++#define ATOM_S0_THERMAL_STATE_SHIFTb3 2 ++ ++#define ATOM_S0_SYSTEM_POWER_STATE_MASKb3 0xE0 ++#define ATOM_S0_LCD1_SHIFT 18 ++ ++// BIOS_1_SCRATCH Definition ++#define ATOM_S1_ROM_LOCATION_MASK 0x0000FFFFL ++#define ATOM_S1_PCI_BUS_DEV_MASK 0xFFFF0000L ++ ++// BIOS_2_SCRATCH Definition ++#define ATOM_S2_TV1_STANDARD_MASK 0x0000000FL ++#define ATOM_S2_CURRENT_BL_LEVEL_MASK 0x0000FF00L ++#define ATOM_S2_CURRENT_BL_LEVEL_SHIFT 8 ++ ++#define ATOM_S2_CRT1_DPMS_STATE 0x00010000L ++#define ATOM_S2_LCD1_DPMS_STATE 0x00020000L ++#define ATOM_S2_TV1_DPMS_STATE 0x00040000L ++#define ATOM_S2_DFP1_DPMS_STATE 0x00080000L ++#define ATOM_S2_CRT2_DPMS_STATE 0x00100000L ++#define ATOM_S2_LCD2_DPMS_STATE 0x00200000L ++#define ATOM_S2_TV2_DPMS_STATE 0x00400000L ++#define ATOM_S2_DFP2_DPMS_STATE 0x00800000L ++#define ATOM_S2_CV_DPMS_STATE 0x01000000L ++#define ATOM_S2_DFP3_DPMS_STATE 0x02000000L ++#define ATOM_S2_DFP4_DPMS_STATE 0x04000000L ++#define ATOM_S2_DFP5_DPMS_STATE 0x08000000L ++ ++#define ATOM_S2_DFP_DPM_STATE ATOM_S2_DFP1_DPMS_STATE | ATOM_S2_DFP2_DPMS_STATE | ATOM_S2_DFP3_DPMS_STATE | ATOM_S2_DFP4_DPMS_STATE | ATOM_S2_DFP5_DPMS_STATE ++ ++#define ATOM_S2_DEVICE_DPMS_STATE (ATOM_S2_CRT1_DPMS_STATE+ATOM_S2_LCD1_DPMS_STATE+ATOM_S2_TV1_DPMS_STATE+\ ++ ATOM_S2_DFP_DPMS_STATE+ATOM_S2_CRT2_DPMS_STATE+ATOM_S2_LCD2_DPMS_STATE+\ ++ ATOM_S2_TV2_DPMS_STATE+ATOM_S2_CV_DPMS_STATE ++ ++#define ATOM_S2_FORCEDLOWPWRMODE_STATE_MASK 0x0C000000L ++#define ATOM_S2_FORCEDLOWPWRMODE_STATE_MASK_SHIFT 26 ++#define ATOM_S2_FORCEDLOWPWRMODE_STATE_CHANGE 0x10000000L ++ ++#define ATOM_S2_VRI_BRIGHT_ENABLE 0x20000000L ++ ++#define ATOM_S2_DISPLAY_ROTATION_0_DEGREE 0x0 ++#define ATOM_S2_DISPLAY_ROTATION_90_DEGREE 0x1 ++#define ATOM_S2_DISPLAY_ROTATION_180_DEGREE 0x2 ++#define ATOM_S2_DISPLAY_ROTATION_270_DEGREE 0x3 ++#define ATOM_S2_DISPLAY_ROTATION_DEGREE_SHIFT 30 ++#define ATOM_S2_DISPLAY_ROTATION_ANGLE_MASK 0xC0000000L ++ ++ ++//Byte aligned defintion for BIOS usage ++#define ATOM_S2_TV1_STANDARD_MASKb0 0x0F ++#define ATOM_S2_CURRENT_BL_LEVEL_MASKb1 0xFF ++#define ATOM_S2_CRT1_DPMS_STATEb2 0x01 ++#define ATOM_S2_LCD1_DPMS_STATEb2 0x02 ++#define ATOM_S2_TV1_DPMS_STATEb2 0x04 ++#define ATOM_S2_DFP1_DPMS_STATEb2 0x08 ++#define ATOM_S2_CRT2_DPMS_STATEb2 0x10 ++#define ATOM_S2_LCD2_DPMS_STATEb2 0x20 ++#define ATOM_S2_TV2_DPMS_STATEb2 0x40 ++#define ATOM_S2_DFP2_DPMS_STATEb2 0x80 ++#define ATOM_S2_CV_DPMS_STATEb3 0x01 ++#define ATOM_S2_DFP3_DPMS_STATEb3 0x02 ++#define ATOM_S2_DFP4_DPMS_STATEb3 0x04 ++#define ATOM_S2_DFP5_DPMS_STATEb3 0x08 ++ ++#define ATOM_S2_DEVICE_DPMS_MASKw1 0x3FF ++#define ATOM_S2_FORCEDLOWPWRMODE_STATE_MASKb3 0x0C ++#define ATOM_S2_FORCEDLOWPWRMODE_STATE_CHANGEb3 0x10 ++#define ATOM_S2_VRI_BRIGHT_ENABLEb3 0x20 ++#define ATOM_S2_ROTATION_STATE_MASKb3 0xC0 ++ ++ ++// BIOS_3_SCRATCH Definition ++#define ATOM_S3_CRT1_ACTIVE 0x00000001L ++#define ATOM_S3_LCD1_ACTIVE 0x00000002L ++#define ATOM_S3_TV1_ACTIVE 0x00000004L ++#define ATOM_S3_DFP1_ACTIVE 0x00000008L ++#define ATOM_S3_CRT2_ACTIVE 0x00000010L ++#define ATOM_S3_LCD2_ACTIVE 0x00000020L ++#define ATOM_S3_TV2_ACTIVE 0x00000040L ++#define ATOM_S3_DFP2_ACTIVE 0x00000080L ++#define ATOM_S3_CV_ACTIVE 0x00000100L ++#define ATOM_S3_DFP3_ACTIVE 0x00000200L ++#define ATOM_S3_DFP4_ACTIVE 0x00000400L ++#define ATOM_S3_DFP5_ACTIVE 0x00000800L ++ ++#define ATOM_S3_DEVICE_ACTIVE_MASK 0x000003FFL ++ ++#define ATOM_S3_LCD_FULLEXPANSION_ACTIVE 0x00001000L ++#define ATOM_S3_LCD_EXPANSION_ASPEC_RATIO_ACTIVE 0x00002000L ++ ++#define ATOM_S3_CRT1_CRTC_ACTIVE 0x00010000L ++#define ATOM_S3_LCD1_CRTC_ACTIVE 0x00020000L ++#define ATOM_S3_TV1_CRTC_ACTIVE 0x00040000L ++#define ATOM_S3_DFP1_CRTC_ACTIVE 0x00080000L ++#define ATOM_S3_CRT2_CRTC_ACTIVE 0x00100000L ++#define ATOM_S3_LCD2_CRTC_ACTIVE 0x00200000L ++#define ATOM_S3_TV2_CRTC_ACTIVE 0x00400000L ++#define ATOM_S3_DFP2_CRTC_ACTIVE 0x00800000L ++#define ATOM_S3_CV_CRTC_ACTIVE 0x01000000L ++#define ATOM_S3_DFP3_CRTC_ACTIVE 0x02000000L ++#define ATOM_S3_DFP4_CRTC_ACTIVE 0x04000000L ++#define ATOM_S3_DFP5_CRTC_ACTIVE 0x08000000L ++ ++#define ATOM_S3_DEVICE_CRTC_ACTIVE_MASK 0x0FFF0000L ++#define ATOM_S3_ASIC_GUI_ENGINE_HUNG 0x20000000L ++#define ATOM_S3_ALLOW_FAST_PWR_SWITCH 0x40000000L ++#define ATOM_S3_RQST_GPU_USE_MIN_PWR 0x80000000L ++ ++//Byte aligned defintion for BIOS usage ++#define ATOM_S3_CRT1_ACTIVEb0 0x01 ++#define ATOM_S3_LCD1_ACTIVEb0 0x02 ++#define ATOM_S3_TV1_ACTIVEb0 0x04 ++#define ATOM_S3_DFP1_ACTIVEb0 0x08 ++#define ATOM_S3_CRT2_ACTIVEb0 0x10 ++#define ATOM_S3_LCD2_ACTIVEb0 0x20 ++#define ATOM_S3_TV2_ACTIVEb0 0x40 ++#define ATOM_S3_DFP2_ACTIVEb0 0x80 ++#define ATOM_S3_CV_ACTIVEb1 0x01 ++#define ATOM_S3_DFP3_ACTIVEb1 0x02 ++#define ATOM_S3_DFP4_ACTIVEb1 0x04 ++#define ATOM_S3_DFP5_ACTIVEb1 0x08 ++ ++#define ATOM_S3_ACTIVE_CRTC1w0 0xFFF ++ ++#define ATOM_S3_CRT1_CRTC_ACTIVEb2 0x01 ++#define ATOM_S3_LCD1_CRTC_ACTIVEb2 0x02 ++#define ATOM_S3_TV1_CRTC_ACTIVEb2 0x04 ++#define ATOM_S3_DFP1_CRTC_ACTIVEb2 0x08 ++#define ATOM_S3_CRT2_CRTC_ACTIVEb2 0x10 ++#define ATOM_S3_LCD2_CRTC_ACTIVEb2 0x20 ++#define ATOM_S3_TV2_CRTC_ACTIVEb2 0x40 ++#define ATOM_S3_DFP2_CRTC_ACTIVEb2 0x80 ++#define ATOM_S3_CV_CRTC_ACTIVEb3 0x01 ++#define ATOM_S3_DFP3_CRTC_ACTIVEb3 0x02 ++#define ATOM_S3_DFP4_CRTC_ACTIVEb3 0x04 ++#define ATOM_S3_DFP5_CRTC_ACTIVEb3 0x08 ++ ++#define ATOM_S3_ACTIVE_CRTC2w1 0xFFF ++ ++#define ATOM_S3_ASIC_GUI_ENGINE_HUNGb3 0x20 ++#define ATOM_S3_ALLOW_FAST_PWR_SWITCHb3 0x40 ++#define ATOM_S3_RQST_GPU_USE_MIN_PWRb3 0x80 ++ ++// BIOS_4_SCRATCH Definition ++#define ATOM_S4_LCD1_PANEL_ID_MASK 0x000000FFL ++#define ATOM_S4_LCD1_REFRESH_MASK 0x0000FF00L ++#define ATOM_S4_LCD1_REFRESH_SHIFT 8 ++ ++//Byte aligned defintion for BIOS usage ++#define ATOM_S4_LCD1_PANEL_ID_MASKb0 0x0FF ++#define ATOM_S4_LCD1_REFRESH_MASKb1 ATOM_S4_LCD1_PANEL_ID_MASKb0 ++#define ATOM_S4_VRAM_INFO_MASKb2 ATOM_S4_LCD1_PANEL_ID_MASKb0 ++ ++// BIOS_5_SCRATCH Definition, BIOS_5_SCRATCH is used by Firmware only !!!! ++#define ATOM_S5_DOS_REQ_CRT1b0 0x01 ++#define ATOM_S5_DOS_REQ_LCD1b0 0x02 ++#define ATOM_S5_DOS_REQ_TV1b0 0x04 ++#define ATOM_S5_DOS_REQ_DFP1b0 0x08 ++#define ATOM_S5_DOS_REQ_CRT2b0 0x10 ++#define ATOM_S5_DOS_REQ_LCD2b0 0x20 ++#define ATOM_S5_DOS_REQ_TV2b0 0x40 ++#define ATOM_S5_DOS_REQ_DFP2b0 0x80 ++#define ATOM_S5_DOS_REQ_CVb1 0x01 ++#define ATOM_S5_DOS_REQ_DFP3b1 0x02 ++#define ATOM_S5_DOS_REQ_DFP4b1 0x04 ++#define ATOM_S5_DOS_REQ_DFP5b1 0x08 ++ ++#define ATOM_S5_DOS_REQ_DEVICEw0 0x03FF ++ ++#define ATOM_S5_DOS_REQ_CRT1 0x0001 ++#define ATOM_S5_DOS_REQ_LCD1 0x0002 ++#define ATOM_S5_DOS_REQ_TV1 0x0004 ++#define ATOM_S5_DOS_REQ_DFP1 0x0008 ++#define ATOM_S5_DOS_REQ_CRT2 0x0010 ++#define ATOM_S5_DOS_REQ_LCD2 0x0020 ++#define ATOM_S5_DOS_REQ_TV2 0x0040 ++#define ATOM_S5_DOS_REQ_DFP2 0x0080 ++#define ATOM_S5_DOS_REQ_CV 0x0100 ++#define ATOM_S5_DOS_REQ_DFP3 0x0200 ++#define ATOM_S5_DOS_REQ_DFP4 0x0400 ++#define ATOM_S5_DOS_REQ_DFP5 0x0800 ++ ++#define ATOM_S5_DOS_FORCE_CRT1b2 ATOM_S5_DOS_REQ_CRT1b0 ++#define ATOM_S5_DOS_FORCE_TV1b2 ATOM_S5_DOS_REQ_TV1b0 ++#define ATOM_S5_DOS_FORCE_CRT2b2 ATOM_S5_DOS_REQ_CRT2b0 ++#define ATOM_S5_DOS_FORCE_CVb3 ATOM_S5_DOS_REQ_CVb1 ++#define ATOM_S5_DOS_FORCE_DEVICEw1 (ATOM_S5_DOS_FORCE_CRT1b2+ATOM_S5_DOS_FORCE_TV1b2+ATOM_S5_DOS_FORCE_CRT2b2+\ ++ (ATOM_S5_DOS_FORCE_CVb3<<8)) ++ ++// BIOS_6_SCRATCH Definition ++#define ATOM_S6_DEVICE_CHANGE 0x00000001L ++#define ATOM_S6_SCALER_CHANGE 0x00000002L ++#define ATOM_S6_LID_CHANGE 0x00000004L ++#define ATOM_S6_DOCKING_CHANGE 0x00000008L ++#define ATOM_S6_ACC_MODE 0x00000010L ++#define ATOM_S6_EXT_DESKTOP_MODE 0x00000020L ++#define ATOM_S6_LID_STATE 0x00000040L ++#define ATOM_S6_DOCK_STATE 0x00000080L ++#define ATOM_S6_CRITICAL_STATE 0x00000100L ++#define ATOM_S6_HW_I2C_BUSY_STATE 0x00000200L ++#define ATOM_S6_THERMAL_STATE_CHANGE 0x00000400L ++#define ATOM_S6_INTERRUPT_SET_BY_BIOS 0x00000800L ++#define ATOM_S6_REQ_LCD_EXPANSION_FULL 0x00001000L //Normal expansion Request bit for LCD ++#define ATOM_S6_REQ_LCD_EXPANSION_ASPEC_RATIO 0x00002000L //Aspect ratio expansion Request bit for LCD ++ ++#define ATOM_S6_DISPLAY_STATE_CHANGE 0x00004000L //This bit is recycled when ATOM_BIOS_INFO_BIOS_SCRATCH6_SCL2_REDEFINE is set,previously it's SCL2_H_expansion ++#define ATOM_S6_I2C_STATE_CHANGE 0x00008000L //This bit is recycled,when ATOM_BIOS_INFO_BIOS_SCRATCH6_SCL2_REDEFINE is set,previously it's SCL2_V_expansion ++ ++#define ATOM_S6_ACC_REQ_CRT1 0x00010000L ++#define ATOM_S6_ACC_REQ_LCD1 0x00020000L ++#define ATOM_S6_ACC_REQ_TV1 0x00040000L ++#define ATOM_S6_ACC_REQ_DFP1 0x00080000L ++#define ATOM_S6_ACC_REQ_CRT2 0x00100000L ++#define ATOM_S6_ACC_REQ_LCD2 0x00200000L ++#define ATOM_S6_ACC_REQ_TV2 0x00400000L ++#define ATOM_S6_ACC_REQ_DFP2 0x00800000L ++#define ATOM_S6_ACC_REQ_CV 0x01000000L ++#define ATOM_S6_ACC_REQ_DFP3 0x02000000L ++#define ATOM_S6_ACC_REQ_DFP4 0x04000000L ++#define ATOM_S6_ACC_REQ_DFP5 0x08000000L ++ ++#define ATOM_S6_ACC_REQ_MASK 0x0FFF0000L ++#define ATOM_S6_SYSTEM_POWER_MODE_CHANGE 0x10000000L ++#define ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH 0x20000000L ++#define ATOM_S6_VRI_BRIGHTNESS_CHANGE 0x40000000L ++#define ATOM_S6_CONFIG_DISPLAY_CHANGE_MASK 0x80000000L ++ ++//Byte aligned defintion for BIOS usage ++#define ATOM_S6_DEVICE_CHANGEb0 0x01 ++#define ATOM_S6_SCALER_CHANGEb0 0x02 ++#define ATOM_S6_LID_CHANGEb0 0x04 ++#define ATOM_S6_DOCKING_CHANGEb0 0x08 ++#define ATOM_S6_ACC_MODEb0 0x10 ++#define ATOM_S6_EXT_DESKTOP_MODEb0 0x20 ++#define ATOM_S6_LID_STATEb0 0x40 ++#define ATOM_S6_DOCK_STATEb0 0x80 ++#define ATOM_S6_CRITICAL_STATEb1 0x01 ++#define ATOM_S6_HW_I2C_BUSY_STATEb1 0x02 ++#define ATOM_S6_THERMAL_STATE_CHANGEb1 0x04 ++#define ATOM_S6_INTERRUPT_SET_BY_BIOSb1 0x08 ++#define ATOM_S6_REQ_LCD_EXPANSION_FULLb1 0x10 ++#define ATOM_S6_REQ_LCD_EXPANSION_ASPEC_RATIOb1 0x20 ++ ++#define ATOM_S6_ACC_REQ_CRT1b2 0x01 ++#define ATOM_S6_ACC_REQ_LCD1b2 0x02 ++#define ATOM_S6_ACC_REQ_TV1b2 0x04 ++#define ATOM_S6_ACC_REQ_DFP1b2 0x08 ++#define ATOM_S6_ACC_REQ_CRT2b2 0x10 ++#define ATOM_S6_ACC_REQ_LCD2b2 0x20 ++#define ATOM_S6_ACC_REQ_TV2b2 0x40 ++#define ATOM_S6_ACC_REQ_DFP2b2 0x80 ++#define ATOM_S6_ACC_REQ_CVb3 0x01 ++#define ATOM_S6_ACC_REQ_DFP3b3 0x02 ++#define ATOM_S6_ACC_REQ_DFP4b3 0x04 ++#define ATOM_S6_ACC_REQ_DFP5b3 0x08 ++ ++#define ATOM_S6_ACC_REQ_DEVICEw1 ATOM_S5_DOS_REQ_DEVICEw0 ++#define ATOM_S6_SYSTEM_POWER_MODE_CHANGEb3 0x10 ++#define ATOM_S6_ACC_BLOCK_DISPLAY_SWITCHb3 0x20 ++#define ATOM_S6_VRI_BRIGHTNESS_CHANGEb3 0x40 ++#define ATOM_S6_CONFIG_DISPLAY_CHANGEb3 0x80 ++ ++#define ATOM_S6_DEVICE_CHANGE_SHIFT 0 ++#define ATOM_S6_SCALER_CHANGE_SHIFT 1 ++#define ATOM_S6_LID_CHANGE_SHIFT 2 ++#define ATOM_S6_DOCKING_CHANGE_SHIFT 3 ++#define ATOM_S6_ACC_MODE_SHIFT 4 ++#define ATOM_S6_EXT_DESKTOP_MODE_SHIFT 5 ++#define ATOM_S6_LID_STATE_SHIFT 6 ++#define ATOM_S6_DOCK_STATE_SHIFT 7 ++#define ATOM_S6_CRITICAL_STATE_SHIFT 8 ++#define ATOM_S6_HW_I2C_BUSY_STATE_SHIFT 9 ++#define ATOM_S6_THERMAL_STATE_CHANGE_SHIFT 10 ++#define ATOM_S6_INTERRUPT_SET_BY_BIOS_SHIFT 11 ++#define ATOM_S6_REQ_SCALER_SHIFT 12 ++#define ATOM_S6_REQ_SCALER_ARATIO_SHIFT 13 ++#define ATOM_S6_DISPLAY_STATE_CHANGE_SHIFT 14 ++#define ATOM_S6_I2C_STATE_CHANGE_SHIFT 15 ++#define ATOM_S6_SYSTEM_POWER_MODE_CHANGE_SHIFT 28 ++#define ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH_SHIFT 29 ++#define ATOM_S6_VRI_BRIGHTNESS_CHANGE_SHIFT 30 ++#define ATOM_S6_CONFIG_DISPLAY_CHANGE_SHIFT 31 ++ ++// BIOS_7_SCRATCH Definition, BIOS_7_SCRATCH is used by Firmware only !!!! ++#define ATOM_S7_DOS_MODE_TYPEb0 0x03 ++#define ATOM_S7_DOS_MODE_VGAb0 0x00 ++#define ATOM_S7_DOS_MODE_VESAb0 0x01 ++#define ATOM_S7_DOS_MODE_EXTb0 0x02 ++#define ATOM_S7_DOS_MODE_PIXEL_DEPTHb0 0x0C ++#define ATOM_S7_DOS_MODE_PIXEL_FORMATb0 0xF0 ++#define ATOM_S7_DOS_8BIT_DAC_ENb1 0x01 ++#define ATOM_S7_DOS_MODE_NUMBERw1 0x0FFFF ++ ++#define ATOM_S7_DOS_8BIT_DAC_EN_SHIFT 8 ++ ++// BIOS_8_SCRATCH Definition ++#define ATOM_S8_I2C_CHANNEL_BUSY_MASK 0x00000FFFF ++#define ATOM_S8_I2C_HW_ENGINE_BUSY_MASK 0x0FFFF0000 ++ ++#define ATOM_S8_I2C_CHANNEL_BUSY_SHIFT 0 ++#define ATOM_S8_I2C_ENGINE_BUSY_SHIFT 16 ++ ++// BIOS_9_SCRATCH Definition ++#ifndef ATOM_S9_I2C_CHANNEL_COMPLETED_MASK ++#define ATOM_S9_I2C_CHANNEL_COMPLETED_MASK 0x0000FFFF ++#endif ++#ifndef ATOM_S9_I2C_CHANNEL_ABORTED_MASK ++#define ATOM_S9_I2C_CHANNEL_ABORTED_MASK 0xFFFF0000 ++#endif ++#ifndef ATOM_S9_I2C_CHANNEL_COMPLETED_SHIFT ++#define ATOM_S9_I2C_CHANNEL_COMPLETED_SHIFT 0 ++#endif ++#ifndef ATOM_S9_I2C_CHANNEL_ABORTED_SHIFT ++#define ATOM_S9_I2C_CHANNEL_ABORTED_SHIFT 16 ++#endif ++ ++ ++#define ATOM_FLAG_SET 0x20 ++#define ATOM_FLAG_CLEAR 0 ++#define CLEAR_ATOM_S6_ACC_MODE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_ACC_MODE_SHIFT | ATOM_FLAG_CLEAR) ++#define SET_ATOM_S6_DEVICE_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_DEVICE_CHANGE_SHIFT | ATOM_FLAG_SET) ++#define SET_ATOM_S6_VRI_BRIGHTNESS_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_VRI_BRIGHTNESS_CHANGE_SHIFT | ATOM_FLAG_SET) ++#define SET_ATOM_S6_SCALER_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_SCALER_CHANGE_SHIFT | ATOM_FLAG_SET) ++#define SET_ATOM_S6_LID_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_LID_CHANGE_SHIFT | ATOM_FLAG_SET) ++ ++#define SET_ATOM_S6_LID_STATE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_LID_STATE_SHIFT | ATOM_FLAG_SET) ++#define CLEAR_ATOM_S6_LID_STATE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_LID_STATE_SHIFT | ATOM_FLAG_CLEAR) ++ ++#define SET_ATOM_S6_DOCK_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_DOCKING_CHANGE_SHIFT | ATOM_FLAG_SET) ++#define SET_ATOM_S6_DOCK_STATE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_DOCK_STATE_SHIFT | ATOM_FLAG_SET) ++#define CLEAR_ATOM_S6_DOCK_STATE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_DOCK_STATE_SHIFT | ATOM_FLAG_CLEAR) ++ ++#define SET_ATOM_S6_THERMAL_STATE_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_THERMAL_STATE_CHANGE_SHIFT | ATOM_FLAG_SET) ++#define SET_ATOM_S6_SYSTEM_POWER_MODE_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_SYSTEM_POWER_MODE_CHANGE_SHIFT | ATOM_FLAG_SET) ++#define SET_ATOM_S6_INTERRUPT_SET_BY_BIOS ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_INTERRUPT_SET_BY_BIOS_SHIFT | ATOM_FLAG_SET) ++ ++#define SET_ATOM_S6_CRITICAL_STATE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_CRITICAL_STATE_SHIFT | ATOM_FLAG_SET) ++#define CLEAR_ATOM_S6_CRITICAL_STATE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_CRITICAL_STATE_SHIFT | ATOM_FLAG_CLEAR) ++ ++#define SET_ATOM_S6_REQ_SCALER ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_REQ_SCALER_SHIFT | ATOM_FLAG_SET) ++#define CLEAR_ATOM_S6_REQ_SCALER ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_REQ_SCALER_SHIFT | ATOM_FLAG_CLEAR ) ++ ++#define SET_ATOM_S6_REQ_SCALER_ARATIO ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_REQ_SCALER_ARATIO_SHIFT | ATOM_FLAG_SET ) ++#define CLEAR_ATOM_S6_REQ_SCALER_ARATIO ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_REQ_SCALER_ARATIO_SHIFT | ATOM_FLAG_CLEAR ) ++ ++#define SET_ATOM_S6_I2C_STATE_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_I2C_STATE_CHANGE_SHIFT | ATOM_FLAG_SET ) ++ ++#define SET_ATOM_S6_DISPLAY_STATE_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_DISPLAY_STATE_CHANGE_SHIFT | ATOM_FLAG_SET ) ++ ++#define SET_ATOM_S6_DEVICE_RECONFIG ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_CONFIG_DISPLAY_CHANGE_SHIFT | ATOM_FLAG_SET) ++#define CLEAR_ATOM_S0_LCD1 ((ATOM_DEVICE_CONNECT_INFO_DEF << 8 )| ATOM_S0_LCD1_SHIFT | ATOM_FLAG_CLEAR ) ++#define SET_ATOM_S7_DOS_8BIT_DAC_EN ((ATOM_DOS_MODE_INFO_DEF << 8 )|ATOM_S7_DOS_8BIT_DAC_EN_SHIFT | ATOM_FLAG_SET ) ++#define CLEAR_ATOM_S7_DOS_8BIT_DAC_EN ((ATOM_DOS_MODE_INFO_DEF << 8 )|ATOM_S7_DOS_8BIT_DAC_EN_SHIFT | ATOM_FLAG_CLEAR ) ++ ++/****************************************************************************/ ++//Portion II: Definitinos only used in Driver ++/****************************************************************************/ ++ ++// Macros used by driver ++ ++#define GetIndexIntoMasterTable(MasterOrData, FieldName) (((char*)(&((ATOM_MASTER_LIST_OF_##MasterOrData##_TABLES*)0)->FieldName)-(char*)0)/sizeof(USHORT)) ++ ++#define GET_COMMAND_TABLE_COMMANDSET_REVISION(TABLE_HEADER_OFFSET) ((((ATOM_COMMON_TABLE_HEADER*)TABLE_HEADER_OFFSET)->ucTableFormatRevision)&0x3F) ++#define GET_COMMAND_TABLE_PARAMETER_REVISION(TABLE_HEADER_OFFSET) ((((ATOM_COMMON_TABLE_HEADER*)TABLE_HEADER_OFFSET)->ucTableContentRevision)&0x3F) ++ ++#define GET_DATA_TABLE_MAJOR_REVISION GET_COMMAND_TABLE_COMMANDSET_REVISION ++#define GET_DATA_TABLE_MINOR_REVISION GET_COMMAND_TABLE_PARAMETER_REVISION ++ ++/****************************************************************************/ ++//Portion III: Definitinos only used in VBIOS ++/****************************************************************************/ ++#define ATOM_DAC_SRC 0x80 ++#define ATOM_SRC_DAC1 0 ++#define ATOM_SRC_DAC2 0x80 ++ ++ ++#ifdef UEFI_BUILD ++ #define USHORT UTEMP ++#endif ++ ++typedef struct _MEMORY_PLLINIT_PARAMETERS ++{ ++ ULONG ulTargetMemoryClock; //In 10Khz unit ++ UCHAR ucAction; //not define yet ++ UCHAR ucFbDiv_Hi; //Fbdiv Hi byte ++ UCHAR ucFbDiv; //FB value ++ UCHAR ucPostDiv; //Post div ++}MEMORY_PLLINIT_PARAMETERS; ++ ++#define MEMORY_PLLINIT_PS_ALLOCATION MEMORY_PLLINIT_PARAMETERS ++ ++ ++#define GPIO_PIN_WRITE 0x01 ++#define GPIO_PIN_READ 0x00 ++ ++typedef struct _GPIO_PIN_CONTROL_PARAMETERS ++{ ++ UCHAR ucGPIO_ID; //return value, read from GPIO pins ++ UCHAR ucGPIOBitShift; //define which bit in uGPIOBitVal need to be update ++ UCHAR ucGPIOBitVal; //Set/Reset corresponding bit defined in ucGPIOBitMask ++ UCHAR ucAction; //=GPIO_PIN_WRITE: Read; =GPIO_PIN_READ: Write ++}GPIO_PIN_CONTROL_PARAMETERS; ++ ++typedef struct _ENABLE_SCALER_PARAMETERS ++{ ++ UCHAR ucScaler; // ATOM_SCALER1, ATOM_SCALER2 ++ UCHAR ucEnable; // ATOM_SCALER_DISABLE or ATOM_SCALER_CENTER or ATOM_SCALER_EXPANSION ++ UCHAR ucTVStandard; // ++ UCHAR ucPadding[1]; ++}ENABLE_SCALER_PARAMETERS; ++#define ENABLE_SCALER_PS_ALLOCATION ENABLE_SCALER_PARAMETERS ++ ++//ucEnable: ++#define SCALER_BYPASS_AUTO_CENTER_NO_REPLICATION 0 ++#define SCALER_BYPASS_AUTO_CENTER_AUTO_REPLICATION 1 ++#define SCALER_ENABLE_2TAP_ALPHA_MODE 2 ++#define SCALER_ENABLE_MULTITAP_MODE 3 ++ ++typedef struct _ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS ++{ ++ ULONG usHWIconHorzVertPosn; // Hardware Icon Vertical position ++ UCHAR ucHWIconVertOffset; // Hardware Icon Vertical offset ++ UCHAR ucHWIconHorzOffset; // Hardware Icon Horizontal offset ++ UCHAR ucSelection; // ATOM_CURSOR1 or ATOM_ICON1 or ATOM_CURSOR2 or ATOM_ICON2 ++ UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE ++}ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS; ++ ++typedef struct _ENABLE_HARDWARE_ICON_CURSOR_PS_ALLOCATION ++{ ++ ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS sEnableIcon; ++ ENABLE_CRTC_PARAMETERS sReserved; ++}ENABLE_HARDWARE_ICON_CURSOR_PS_ALLOCATION; ++ ++typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS ++{ ++ USHORT usHight; // Image Hight ++ USHORT usWidth; // Image Width ++ UCHAR ucSurface; // Surface 1 or 2 ++ UCHAR ucPadding[3]; ++}ENABLE_GRAPH_SURFACE_PARAMETERS; ++ ++typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS_V1_2 ++{ ++ USHORT usHight; // Image Hight ++ USHORT usWidth; // Image Width ++ UCHAR ucSurface; // Surface 1 or 2 ++ UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE ++ UCHAR ucPadding[2]; ++}ENABLE_GRAPH_SURFACE_PARAMETERS_V1_2; ++ ++typedef struct _ENABLE_GRAPH_SURFACE_PS_ALLOCATION ++{ ++ ENABLE_GRAPH_SURFACE_PARAMETERS sSetSurface; ++ ENABLE_YUV_PS_ALLOCATION sReserved; // Don't set this one ++}ENABLE_GRAPH_SURFACE_PS_ALLOCATION; ++ ++typedef struct _MEMORY_CLEAN_UP_PARAMETERS ++{ ++ USHORT usMemoryStart; //in 8Kb boundry, offset from memory base address ++ USHORT usMemorySize; //8Kb blocks aligned ++}MEMORY_CLEAN_UP_PARAMETERS; ++#define MEMORY_CLEAN_UP_PS_ALLOCATION MEMORY_CLEAN_UP_PARAMETERS ++ ++typedef struct _GET_DISPLAY_SURFACE_SIZE_PARAMETERS ++{ ++ USHORT usX_Size; //When use as input parameter, usX_Size indicates which CRTC ++ USHORT usY_Size; ++}GET_DISPLAY_SURFACE_SIZE_PARAMETERS; ++ ++typedef struct _INDIRECT_IO_ACCESS ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ UCHAR IOAccessSequence[256]; ++} INDIRECT_IO_ACCESS; ++ ++#define INDIRECT_READ 0x00 ++#define INDIRECT_WRITE 0x80 ++ ++#define INDIRECT_IO_MM 0 ++#define INDIRECT_IO_PLL 1 ++#define INDIRECT_IO_MC 2 ++#define INDIRECT_IO_PCIE 3 ++#define INDIRECT_IO_PCIEP 4 ++#define INDIRECT_IO_NBMISC 5 ++ ++#define INDIRECT_IO_PLL_READ INDIRECT_IO_PLL | INDIRECT_READ ++#define INDIRECT_IO_PLL_WRITE INDIRECT_IO_PLL | INDIRECT_WRITE ++#define INDIRECT_IO_MC_READ INDIRECT_IO_MC | INDIRECT_READ ++#define INDIRECT_IO_MC_WRITE INDIRECT_IO_MC | INDIRECT_WRITE ++#define INDIRECT_IO_PCIE_READ INDIRECT_IO_PCIE | INDIRECT_READ ++#define INDIRECT_IO_PCIE_WRITE INDIRECT_IO_PCIE | INDIRECT_WRITE ++#define INDIRECT_IO_PCIEP_READ INDIRECT_IO_PCIEP | INDIRECT_READ ++#define INDIRECT_IO_PCIEP_WRITE INDIRECT_IO_PCIEP | INDIRECT_WRITE ++#define INDIRECT_IO_NBMISC_READ INDIRECT_IO_NBMISC | INDIRECT_READ ++#define INDIRECT_IO_NBMISC_WRITE INDIRECT_IO_NBMISC | INDIRECT_WRITE ++ ++typedef struct _ATOM_OEM_INFO ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ ATOM_I2C_ID_CONFIG_ACCESS sucI2cId; ++}ATOM_OEM_INFO; ++ ++typedef struct _ATOM_TV_MODE ++{ ++ UCHAR ucVMode_Num; //Video mode number ++ UCHAR ucTV_Mode_Num; //Internal TV mode number ++}ATOM_TV_MODE; ++ ++typedef struct _ATOM_BIOS_INT_TVSTD_MODE ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ USHORT usTV_Mode_LUT_Offset; // Pointer to standard to internal number conversion table ++ USHORT usTV_FIFO_Offset; // Pointer to FIFO entry table ++ USHORT usNTSC_Tbl_Offset; // Pointer to SDTV_Mode_NTSC table ++ USHORT usPAL_Tbl_Offset; // Pointer to SDTV_Mode_PAL table ++ USHORT usCV_Tbl_Offset; // Pointer to SDTV_Mode_PAL table ++}ATOM_BIOS_INT_TVSTD_MODE; ++ ++ ++typedef struct _ATOM_TV_MODE_SCALER_PTR ++{ ++ USHORT ucFilter0_Offset; //Pointer to filter format 0 coefficients ++ USHORT usFilter1_Offset; //Pointer to filter format 0 coefficients ++ UCHAR ucTV_Mode_Num; ++}ATOM_TV_MODE_SCALER_PTR; ++ ++typedef struct _ATOM_STANDARD_VESA_TIMING ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ ATOM_DTD_FORMAT aModeTimings[16]; // 16 is not the real array number, just for initial allocation ++}ATOM_STANDARD_VESA_TIMING; ++ ++ ++typedef struct _ATOM_STD_FORMAT ++{ ++ USHORT usSTD_HDisp; ++ USHORT usSTD_VDisp; ++ USHORT usSTD_RefreshRate; ++ USHORT usReserved; ++}ATOM_STD_FORMAT; ++ ++typedef struct _ATOM_VESA_TO_EXTENDED_MODE ++{ ++ USHORT usVESA_ModeNumber; ++ USHORT usExtendedModeNumber; ++}ATOM_VESA_TO_EXTENDED_MODE; ++ ++typedef struct _ATOM_VESA_TO_INTENAL_MODE_LUT ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ ATOM_VESA_TO_EXTENDED_MODE asVESA_ToExtendedModeInfo[76]; ++}ATOM_VESA_TO_INTENAL_MODE_LUT; ++ ++/*************** ATOM Memory Related Data Structure ***********************/ ++typedef struct _ATOM_MEMORY_VENDOR_BLOCK{ ++ UCHAR ucMemoryType; ++ UCHAR ucMemoryVendor; ++ UCHAR ucAdjMCId; ++ UCHAR ucDynClkId; ++ ULONG ulDllResetClkRange; ++}ATOM_MEMORY_VENDOR_BLOCK; ++ ++ ++typedef struct _ATOM_MEMORY_SETTING_ID_CONFIG{ ++#if ATOM_BIG_ENDIAN ++ ULONG ucMemBlkId:8; ++ ULONG ulMemClockRange:24; ++#else ++ ULONG ulMemClockRange:24; ++ ULONG ucMemBlkId:8; ++#endif ++}ATOM_MEMORY_SETTING_ID_CONFIG; ++ ++typedef union _ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS ++{ ++ ATOM_MEMORY_SETTING_ID_CONFIG slAccess; ++ ULONG ulAccess; ++}ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS; ++ ++ ++typedef struct _ATOM_MEMORY_SETTING_DATA_BLOCK{ ++ ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS ulMemoryID; ++ ULONG aulMemData[1]; ++}ATOM_MEMORY_SETTING_DATA_BLOCK; ++ ++ ++typedef struct _ATOM_INIT_REG_INDEX_FORMAT{ ++ USHORT usRegIndex; // MC register index ++ UCHAR ucPreRegDataLength; // offset in ATOM_INIT_REG_DATA_BLOCK.saRegDataBuf ++}ATOM_INIT_REG_INDEX_FORMAT; ++ ++ ++typedef struct _ATOM_INIT_REG_BLOCK{ ++ USHORT usRegIndexTblSize; //size of asRegIndexBuf ++ USHORT usRegDataBlkSize; //size of ATOM_MEMORY_SETTING_DATA_BLOCK ++ ATOM_INIT_REG_INDEX_FORMAT asRegIndexBuf[1]; ++ ATOM_MEMORY_SETTING_DATA_BLOCK asRegDataBuf[1]; ++}ATOM_INIT_REG_BLOCK; ++ ++#define END_OF_REG_INDEX_BLOCK 0x0ffff ++#define END_OF_REG_DATA_BLOCK 0x00000000 ++#define ATOM_INIT_REG_MASK_FLAG 0x80 ++#define CLOCK_RANGE_HIGHEST 0x00ffffff ++ ++#define VALUE_DWORD SIZEOF ULONG ++#define VALUE_SAME_AS_ABOVE 0 ++#define VALUE_MASK_DWORD 0x84 ++ ++#define INDEX_ACCESS_RANGE_BEGIN (VALUE_DWORD + 1) ++#define INDEX_ACCESS_RANGE_END (INDEX_ACCESS_RANGE_BEGIN + 1) ++#define VALUE_INDEX_ACCESS_SINGLE (INDEX_ACCESS_RANGE_END + 1) ++ ++ ++typedef struct _ATOM_MC_INIT_PARAM_TABLE ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ USHORT usAdjustARB_SEQDataOffset; ++ USHORT usMCInitMemTypeTblOffset; ++ USHORT usMCInitCommonTblOffset; ++ USHORT usMCInitPowerDownTblOffset; ++ ULONG ulARB_SEQDataBuf[32]; ++ ATOM_INIT_REG_BLOCK asMCInitMemType; ++ ATOM_INIT_REG_BLOCK asMCInitCommon; ++}ATOM_MC_INIT_PARAM_TABLE; ++ ++ ++#define _4Mx16 0x2 ++#define _4Mx32 0x3 ++#define _8Mx16 0x12 ++#define _8Mx32 0x13 ++#define _16Mx16 0x22 ++#define _16Mx32 0x23 ++#define _32Mx16 0x32 ++#define _32Mx32 0x33 ++#define _64Mx8 0x41 ++#define _64Mx16 0x42 ++ ++#define SAMSUNG 0x1 ++#define INFINEON 0x2 ++#define ELPIDA 0x3 ++#define ETRON 0x4 ++#define NANYA 0x5 ++#define HYNIX 0x6 ++#define MOSEL 0x7 ++#define WINBOND 0x8 ++#define ESMT 0x9 ++#define MICRON 0xF ++ ++#define QIMONDA INFINEON ++#define PROMOS MOSEL ++ ++/////////////Support for GDDR5 MC uCode to reside in upper 64K of ROM///////////// ++ ++#define UCODE_ROM_START_ADDRESS 0x1c000 ++#define UCODE_SIGNATURE 0x4375434d // 'MCuC' - MC uCode ++ ++//uCode block header for reference ++ ++typedef struct _MCuCodeHeader ++{ ++ ULONG ulSignature; ++ UCHAR ucRevision; ++ UCHAR ucChecksum; ++ UCHAR ucReserved1; ++ UCHAR ucReserved2; ++ USHORT usParametersLength; ++ USHORT usUCodeLength; ++ USHORT usReserved1; ++ USHORT usReserved2; ++} MCuCodeHeader; ++ ++////////////////////////////////////////////////////////////////////////////////// ++ ++#define ATOM_MAX_NUMBER_OF_VRAM_MODULE 16 ++ ++#define ATOM_VRAM_MODULE_MEMORY_VENDOR_ID_MASK 0xF ++typedef struct _ATOM_VRAM_MODULE_V1 ++{ ++ ULONG ulReserved; ++ USHORT usEMRSValue; ++ USHORT usMRSValue; ++ USHORT usReserved; ++ UCHAR ucExtMemoryID; // An external indicator (by hardcode, callback or pin) to tell what is the current memory module ++ UCHAR ucMemoryType; // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] reserved; ++ UCHAR ucMemoryVenderID; // Predefined,never change across designs or memory type/vender ++ UCHAR ucMemoryDeviceCfg; // [7:4]=0x0:4M;=0x1:8M;=0x2:16M;0x3:32M....[3:0]=0x0:x4;=0x1:x8;=0x2:x16;=0x3:x32... ++ UCHAR ucRow; // Number of Row,in power of 2; ++ UCHAR ucColumn; // Number of Column,in power of 2; ++ UCHAR ucBank; // Nunber of Bank; ++ UCHAR ucRank; // Number of Rank, in power of 2 ++ UCHAR ucChannelNum; // Number of channel; ++ UCHAR ucChannelConfig; // [3:0]=Indication of what channel combination;[4:7]=Channel bit width, in number of 2 ++ UCHAR ucDefaultMVDDQ_ID; // Default MVDDQ setting for this memory block, ID linking to MVDDQ info table to find real set-up data; ++ UCHAR ucDefaultMVDDC_ID; // Default MVDDC setting for this memory block, ID linking to MVDDC info table to find real set-up data; ++ UCHAR ucReserved[2]; ++}ATOM_VRAM_MODULE_V1; ++ ++ ++typedef struct _ATOM_VRAM_MODULE_V2 ++{ ++ ULONG ulReserved; ++ ULONG ulFlags; // To enable/disable functionalities based on memory type ++ ULONG ulEngineClock; // Override of default engine clock for particular memory type ++ ULONG ulMemoryClock; // Override of default memory clock for particular memory type ++ USHORT usEMRS2Value; // EMRS2 Value is used for GDDR2 and GDDR4 memory type ++ USHORT usEMRS3Value; // EMRS3 Value is used for GDDR2 and GDDR4 memory type ++ USHORT usEMRSValue; ++ USHORT usMRSValue; ++ USHORT usReserved; ++ UCHAR ucExtMemoryID; // An external indicator (by hardcode, callback or pin) to tell what is the current memory module ++ UCHAR ucMemoryType; // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] - must not be used for now; ++ UCHAR ucMemoryVenderID; // Predefined,never change across designs or memory type/vender. If not predefined, vendor detection table gets executed ++ UCHAR ucMemoryDeviceCfg; // [7:4]=0x0:4M;=0x1:8M;=0x2:16M;0x3:32M....[3:0]=0x0:x4;=0x1:x8;=0x2:x16;=0x3:x32... ++ UCHAR ucRow; // Number of Row,in power of 2; ++ UCHAR ucColumn; // Number of Column,in power of 2; ++ UCHAR ucBank; // Nunber of Bank; ++ UCHAR ucRank; // Number of Rank, in power of 2 ++ UCHAR ucChannelNum; // Number of channel; ++ UCHAR ucChannelConfig; // [3:0]=Indication of what channel combination;[4:7]=Channel bit width, in number of 2 ++ UCHAR ucDefaultMVDDQ_ID; // Default MVDDQ setting for this memory block, ID linking to MVDDQ info table to find real set-up data; ++ UCHAR ucDefaultMVDDC_ID; // Default MVDDC setting for this memory block, ID linking to MVDDC info table to find real set-up data; ++ UCHAR ucRefreshRateFactor; ++ UCHAR ucReserved[3]; ++}ATOM_VRAM_MODULE_V2; ++ ++ ++typedef struct _ATOM_MEMORY_TIMING_FORMAT ++{ ++ ULONG ulClkRange; // memory clock in 10kHz unit, when target memory clock is below this clock, use this memory timing ++ union{ ++ USHORT usMRS; // mode register ++ USHORT usDDR3_MR0; ++ }; ++ union{ ++ USHORT usEMRS; // extended mode register ++ USHORT usDDR3_MR1; ++ }; ++ UCHAR ucCL; // CAS latency ++ UCHAR ucWL; // WRITE Latency ++ UCHAR uctRAS; // tRAS ++ UCHAR uctRC; // tRC ++ UCHAR uctRFC; // tRFC ++ UCHAR uctRCDR; // tRCDR ++ UCHAR uctRCDW; // tRCDW ++ UCHAR uctRP; // tRP ++ UCHAR uctRRD; // tRRD ++ UCHAR uctWR; // tWR ++ UCHAR uctWTR; // tWTR ++ UCHAR uctPDIX; // tPDIX ++ UCHAR uctFAW; // tFAW ++ UCHAR uctAOND; // tAOND ++ union ++ { ++ struct { ++ UCHAR ucflag; // flag to control memory timing calculation. bit0= control EMRS2 Infineon ++ UCHAR ucReserved; ++ }; ++ USHORT usDDR3_MR2; ++ }; ++}ATOM_MEMORY_TIMING_FORMAT; ++ ++ ++typedef struct _ATOM_MEMORY_TIMING_FORMAT_V1 ++{ ++ ULONG ulClkRange; // memory clock in 10kHz unit, when target memory clock is below this clock, use this memory timing ++ USHORT usMRS; // mode register ++ USHORT usEMRS; // extended mode register ++ UCHAR ucCL; // CAS latency ++ UCHAR ucWL; // WRITE Latency ++ UCHAR uctRAS; // tRAS ++ UCHAR uctRC; // tRC ++ UCHAR uctRFC; // tRFC ++ UCHAR uctRCDR; // tRCDR ++ UCHAR uctRCDW; // tRCDW ++ UCHAR uctRP; // tRP ++ UCHAR uctRRD; // tRRD ++ UCHAR uctWR; // tWR ++ UCHAR uctWTR; // tWTR ++ UCHAR uctPDIX; // tPDIX ++ UCHAR uctFAW; // tFAW ++ UCHAR uctAOND; // tAOND ++ UCHAR ucflag; // flag to control memory timing calculation. bit0= control EMRS2 Infineon ++////////////////////////////////////GDDR parameters/////////////////////////////////// ++ UCHAR uctCCDL; // ++ UCHAR uctCRCRL; // ++ UCHAR uctCRCWL; // ++ UCHAR uctCKE; // ++ UCHAR uctCKRSE; // ++ UCHAR uctCKRSX; // ++ UCHAR uctFAW32; // ++ UCHAR ucReserved1; // ++ UCHAR ucReserved2; // ++ UCHAR ucTerminator; ++}ATOM_MEMORY_TIMING_FORMAT_V1; ++ ++ ++typedef struct _ATOM_MEMORY_FORMAT ++{ ++ ULONG ulDllDisClock; // memory DLL will be disable when target memory clock is below this clock ++ union{ ++ USHORT usEMRS2Value; // EMRS2 Value is used for GDDR2 and GDDR4 memory type ++ USHORT usDDR3_Reserved; // Not used for DDR3 memory ++ }; ++ union{ ++ USHORT usEMRS3Value; // EMRS3 Value is used for GDDR2 and GDDR4 memory type ++ USHORT usDDR3_MR3; // Used for DDR3 memory ++ }; ++ UCHAR ucMemoryType; // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] - must not be used for now; ++ UCHAR ucMemoryVenderID; // Predefined,never change across designs or memory type/vender. If not predefined, vendor detection table gets executed ++ UCHAR ucRow; // Number of Row,in power of 2; ++ UCHAR ucColumn; // Number of Column,in power of 2; ++ UCHAR ucBank; // Nunber of Bank; ++ UCHAR ucRank; // Number of Rank, in power of 2 ++ UCHAR ucBurstSize; // burst size, 0= burst size=4 1= burst size=8 ++ UCHAR ucDllDisBit; // position of DLL Enable/Disable bit in EMRS ( Extended Mode Register ) ++ UCHAR ucRefreshRateFactor; // memory refresh rate in unit of ms ++ UCHAR ucDensity; // _8Mx32, _16Mx32, _16Mx16, _32Mx16 ++ UCHAR ucPreamble; //[7:4] Write Preamble, [3:0] Read Preamble ++ UCHAR ucMemAttrib; // Memory Device Addribute, like RDBI/WDBI etc ++ ATOM_MEMORY_TIMING_FORMAT asMemTiming[5]; //Memory Timing block sort from lower clock to higher clock ++}ATOM_MEMORY_FORMAT; ++ ++ ++typedef struct _ATOM_VRAM_MODULE_V3 ++{ ++ ULONG ulChannelMapCfg; // board dependent paramenter:Channel combination ++ USHORT usSize; // size of ATOM_VRAM_MODULE_V3 ++ USHORT usDefaultMVDDQ; // board dependent parameter:Default Memory Core Voltage ++ USHORT usDefaultMVDDC; // board dependent parameter:Default Memory IO Voltage ++ UCHAR ucExtMemoryID; // An external indicator (by hardcode, callback or pin) to tell what is the current memory module ++ UCHAR ucChannelNum; // board dependent parameter:Number of channel; ++ UCHAR ucChannelSize; // board dependent parameter:32bit or 64bit ++ UCHAR ucVREFI; // board dependnt parameter: EXT or INT +160mv to -140mv ++ UCHAR ucNPL_RT; // board dependent parameter:NPL round trip delay, used for calculate memory timing parameters ++ UCHAR ucFlag; // To enable/disable functionalities based on memory type ++ ATOM_MEMORY_FORMAT asMemory; // describ all of video memory parameters from memory spec ++}ATOM_VRAM_MODULE_V3; ++ ++ ++//ATOM_VRAM_MODULE_V3.ucNPL_RT ++#define NPL_RT_MASK 0x0f ++#define BATTERY_ODT_MASK 0xc0 ++ ++#define ATOM_VRAM_MODULE ATOM_VRAM_MODULE_V3 ++ ++typedef struct _ATOM_VRAM_MODULE_V4 ++{ ++ ULONG ulChannelMapCfg; // board dependent parameter: Channel combination ++ USHORT usModuleSize; // size of ATOM_VRAM_MODULE_V4, make it easy for VBIOS to look for next entry of VRAM_MODULE ++ USHORT usPrivateReserved; // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!! ++ // MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS) ++ USHORT usReserved; ++ UCHAR ucExtMemoryID; // An external indicator (by hardcode, callback or pin) to tell what is the current memory module ++ UCHAR ucMemoryType; // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4; 0x5:DDR5 [3:0] - Must be 0x0 for now; ++ UCHAR ucChannelNum; // Number of channels present in this module config ++ UCHAR ucChannelWidth; // 0 - 32 bits; 1 - 64 bits ++ UCHAR ucDensity; // _8Mx32, _16Mx32, _16Mx16, _32Mx16 ++ UCHAR ucFlag; // To enable/disable functionalities based on memory type ++ UCHAR ucMisc; // bit0: 0 - single rank; 1 - dual rank; bit2: 0 - burstlength 4, 1 - burstlength 8 ++ UCHAR ucVREFI; // board dependent parameter ++ UCHAR ucNPL_RT; // board dependent parameter:NPL round trip delay, used for calculate memory timing parameters ++ UCHAR ucPreamble; // [7:4] Write Preamble, [3:0] Read Preamble ++ UCHAR ucMemorySize; // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!! ++ // Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros ++ UCHAR ucReserved[3]; ++ ++//compare with V3, we flat the struct by merging ATOM_MEMORY_FORMAT (as is) into V4 as the same level ++ union{ ++ USHORT usEMRS2Value; // EMRS2 Value is used for GDDR2 and GDDR4 memory type ++ USHORT usDDR3_Reserved; ++ }; ++ union{ ++ USHORT usEMRS3Value; // EMRS3 Value is used for GDDR2 and GDDR4 memory type ++ USHORT usDDR3_MR3; // Used for DDR3 memory ++ }; ++ UCHAR ucMemoryVenderID; // Predefined, If not predefined, vendor detection table gets executed ++ UCHAR ucRefreshRateFactor; // [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms) ++ UCHAR ucReserved2[2]; ++ ATOM_MEMORY_TIMING_FORMAT asMemTiming[5];//Memory Timing block sort from lower clock to higher clock ++}ATOM_VRAM_MODULE_V4; ++ ++#define VRAM_MODULE_V4_MISC_RANK_MASK 0x3 ++#define VRAM_MODULE_V4_MISC_DUAL_RANK 0x1 ++#define VRAM_MODULE_V4_MISC_BL_MASK 0x4 ++#define VRAM_MODULE_V4_MISC_BL8 0x4 ++#define VRAM_MODULE_V4_MISC_DUAL_CS 0x10 ++ ++typedef struct _ATOM_VRAM_MODULE_V5 ++{ ++ ULONG ulChannelMapCfg; // board dependent parameter: Channel combination ++ USHORT usModuleSize; // size of ATOM_VRAM_MODULE_V4, make it easy for VBIOS to look for next entry of VRAM_MODULE ++ USHORT usPrivateReserved; // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!! ++ // MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS) ++ USHORT usReserved; ++ UCHAR ucExtMemoryID; // An external indicator (by hardcode, callback or pin) to tell what is the current memory module ++ UCHAR ucMemoryType; // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4; 0x5:DDR5 [3:0] - Must be 0x0 for now; ++ UCHAR ucChannelNum; // Number of channels present in this module config ++ UCHAR ucChannelWidth; // 0 - 32 bits; 1 - 64 bits ++ UCHAR ucDensity; // _8Mx32, _16Mx32, _16Mx16, _32Mx16 ++ UCHAR ucFlag; // To enable/disable functionalities based on memory type ++ UCHAR ucMisc; // bit0: 0 - single rank; 1 - dual rank; bit2: 0 - burstlength 4, 1 - burstlength 8 ++ UCHAR ucVREFI; // board dependent parameter ++ UCHAR ucNPL_RT; // board dependent parameter:NPL round trip delay, used for calculate memory timing parameters ++ UCHAR ucPreamble; // [7:4] Write Preamble, [3:0] Read Preamble ++ UCHAR ucMemorySize; // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!! ++ // Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros ++ UCHAR ucReserved[3]; ++ ++//compare with V3, we flat the struct by merging ATOM_MEMORY_FORMAT (as is) into V4 as the same level ++ USHORT usEMRS2Value; // EMRS2 Value is used for GDDR2 and GDDR4 memory type ++ USHORT usEMRS3Value; // EMRS3 Value is used for GDDR2 and GDDR4 memory type ++ UCHAR ucMemoryVenderID; // Predefined, If not predefined, vendor detection table gets executed ++ UCHAR ucRefreshRateFactor; // [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms) ++ UCHAR ucFIFODepth; // FIFO depth supposes to be detected during vendor detection, but if we dont do vendor detection we have to hardcode FIFO Depth ++ UCHAR ucCDR_Bandwidth; // [0:3]=Read CDR bandwidth, [4:7] - Write CDR Bandwidth ++ ATOM_MEMORY_TIMING_FORMAT_V1 asMemTiming[5];//Memory Timing block sort from lower clock to higher clock ++}ATOM_VRAM_MODULE_V5; ++ ++typedef struct _ATOM_VRAM_INFO_V2 ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ UCHAR ucNumOfVRAMModule; ++ ATOM_VRAM_MODULE aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; // just for allocation, real number of blocks is in ucNumOfVRAMModule; ++}ATOM_VRAM_INFO_V2; ++ ++typedef struct _ATOM_VRAM_INFO_V3 ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ USHORT usMemAdjustTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting ++ USHORT usMemClkPatchTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting ++ USHORT usRerseved; ++ UCHAR aVID_PinsShift[9]; // 8 bit strap maximum+terminator ++ UCHAR ucNumOfVRAMModule; ++ ATOM_VRAM_MODULE aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; // just for allocation, real number of blocks is in ucNumOfVRAMModule; ++ ATOM_INIT_REG_BLOCK asMemPatch; // for allocation ++ // ATOM_INIT_REG_BLOCK aMemAdjust; ++}ATOM_VRAM_INFO_V3; ++ ++#define ATOM_VRAM_INFO_LAST ATOM_VRAM_INFO_V3 ++ ++typedef struct _ATOM_VRAM_INFO_V4 ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ USHORT usMemAdjustTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting ++ USHORT usMemClkPatchTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting ++ USHORT usRerseved; ++ UCHAR ucMemDQ7_0ByteRemap; // DQ line byte remap, =0: Memory Data line BYTE0, =1: BYTE1, =2: BYTE2, =3: BYTE3 ++ ULONG ulMemDQ7_0BitRemap; // each DQ line ( 7~0) use 3bits, like: DQ0=Bit[2:0], DQ1:[5:3], ... DQ7:[23:21] ++ UCHAR ucReservde[4]; ++ UCHAR ucNumOfVRAMModule; ++ ATOM_VRAM_MODULE_V4 aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; // just for allocation, real number of blocks is in ucNumOfVRAMModule; ++ ATOM_INIT_REG_BLOCK asMemPatch; // for allocation ++ // ATOM_INIT_REG_BLOCK aMemAdjust; ++}ATOM_VRAM_INFO_V4; ++ ++typedef struct _ATOM_VRAM_GPIO_DETECTION_INFO ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ UCHAR aVID_PinsShift[9]; //8 bit strap maximum+terminator ++}ATOM_VRAM_GPIO_DETECTION_INFO; ++ ++ ++typedef struct _ATOM_MEMORY_TRAINING_INFO ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ UCHAR ucTrainingLoop; ++ UCHAR ucReserved[3]; ++ ATOM_INIT_REG_BLOCK asMemTrainingSetting; ++}ATOM_MEMORY_TRAINING_INFO; ++ ++ ++typedef struct SW_I2C_CNTL_DATA_PARAMETERS ++{ ++ UCHAR ucControl; ++ UCHAR ucData; ++ UCHAR ucSatus; ++ UCHAR ucTemp; ++} SW_I2C_CNTL_DATA_PARAMETERS; ++ ++#define SW_I2C_CNTL_DATA_PS_ALLOCATION SW_I2C_CNTL_DATA_PARAMETERS ++ ++typedef struct _SW_I2C_IO_DATA_PARAMETERS ++{ ++ USHORT GPIO_Info; ++ UCHAR ucAct; ++ UCHAR ucData; ++ } SW_I2C_IO_DATA_PARAMETERS; ++ ++#define SW_I2C_IO_DATA_PS_ALLOCATION SW_I2C_IO_DATA_PARAMETERS ++ ++/****************************SW I2C CNTL DEFINITIONS**********************/ ++#define SW_I2C_IO_RESET 0 ++#define SW_I2C_IO_GET 1 ++#define SW_I2C_IO_DRIVE 2 ++#define SW_I2C_IO_SET 3 ++#define SW_I2C_IO_START 4 ++ ++#define SW_I2C_IO_CLOCK 0 ++#define SW_I2C_IO_DATA 0x80 ++ ++#define SW_I2C_IO_ZERO 0 ++#define SW_I2C_IO_ONE 0x100 ++ ++#define SW_I2C_CNTL_READ 0 ++#define SW_I2C_CNTL_WRITE 1 ++#define SW_I2C_CNTL_START 2 ++#define SW_I2C_CNTL_STOP 3 ++#define SW_I2C_CNTL_OPEN 4 ++#define SW_I2C_CNTL_CLOSE 5 ++#define SW_I2C_CNTL_WRITE1BIT 6 ++ ++//==============================VESA definition Portion=============================== ++#define VESA_OEM_PRODUCT_REV '01.00' ++#define VESA_MODE_ATTRIBUTE_MODE_SUPPORT 0xBB //refer to VBE spec p.32, no TTY support ++#define VESA_MODE_WIN_ATTRIBUTE 7 ++#define VESA_WIN_SIZE 64 ++ ++typedef struct _PTR_32_BIT_STRUCTURE ++{ ++ USHORT Offset16; ++ USHORT Segment16; ++} PTR_32_BIT_STRUCTURE; ++ ++typedef union _PTR_32_BIT_UNION ++{ ++ PTR_32_BIT_STRUCTURE SegmentOffset; ++ ULONG Ptr32_Bit; ++} PTR_32_BIT_UNION; ++ ++typedef struct _VBE_1_2_INFO_BLOCK_UPDATABLE ++{ ++ UCHAR VbeSignature[4]; ++ USHORT VbeVersion; ++ PTR_32_BIT_UNION OemStringPtr; ++ UCHAR Capabilities[4]; ++ PTR_32_BIT_UNION VideoModePtr; ++ USHORT TotalMemory; ++} VBE_1_2_INFO_BLOCK_UPDATABLE; ++ ++ ++typedef struct _VBE_2_0_INFO_BLOCK_UPDATABLE ++{ ++ VBE_1_2_INFO_BLOCK_UPDATABLE CommonBlock; ++ USHORT OemSoftRev; ++ PTR_32_BIT_UNION OemVendorNamePtr; ++ PTR_32_BIT_UNION OemProductNamePtr; ++ PTR_32_BIT_UNION OemProductRevPtr; ++} VBE_2_0_INFO_BLOCK_UPDATABLE; ++ ++typedef union _VBE_VERSION_UNION ++{ ++ VBE_2_0_INFO_BLOCK_UPDATABLE VBE_2_0_InfoBlock; ++ VBE_1_2_INFO_BLOCK_UPDATABLE VBE_1_2_InfoBlock; ++} VBE_VERSION_UNION; ++ ++typedef struct _VBE_INFO_BLOCK ++{ ++ VBE_VERSION_UNION UpdatableVBE_Info; ++ UCHAR Reserved[222]; ++ UCHAR OemData[256]; ++} VBE_INFO_BLOCK; ++ ++typedef struct _VBE_FP_INFO ++{ ++ USHORT HSize; ++ USHORT VSize; ++ USHORT FPType; ++ UCHAR RedBPP; ++ UCHAR GreenBPP; ++ UCHAR BlueBPP; ++ UCHAR ReservedBPP; ++ ULONG RsvdOffScrnMemSize; ++ ULONG RsvdOffScrnMEmPtr; ++ UCHAR Reserved[14]; ++} VBE_FP_INFO; ++ ++typedef struct _VESA_MODE_INFO_BLOCK ++{ ++// Mandatory information for all VBE revisions ++ USHORT ModeAttributes; // dw ? ; mode attributes ++ UCHAR WinAAttributes; // db ? ; window A attributes ++ UCHAR WinBAttributes; // db ? ; window B attributes ++ USHORT WinGranularity; // dw ? ; window granularity ++ USHORT WinSize; // dw ? ; window size ++ USHORT WinASegment; // dw ? ; window A start segment ++ USHORT WinBSegment; // dw ? ; window B start segment ++ ULONG WinFuncPtr; // dd ? ; real mode pointer to window function ++ USHORT BytesPerScanLine;// dw ? ; bytes per scan line ++ ++//; Mandatory information for VBE 1.2 and above ++ USHORT XResolution; // dw ? ; horizontal resolution in pixels or characters ++ USHORT YResolution; // dw ? ; vertical resolution in pixels or characters ++ UCHAR XCharSize; // db ? ; character cell width in pixels ++ UCHAR YCharSize; // db ? ; character cell height in pixels ++ UCHAR NumberOfPlanes; // db ? ; number of memory planes ++ UCHAR BitsPerPixel; // db ? ; bits per pixel ++ UCHAR NumberOfBanks; // db ? ; number of banks ++ UCHAR MemoryModel; // db ? ; memory model type ++ UCHAR BankSize; // db ? ; bank size in KB ++ UCHAR NumberOfImagePages;// db ? ; number of images ++ UCHAR ReservedForPageFunction;//db 1 ; reserved for page function ++ ++//; Direct Color fields(required for direct/6 and YUV/7 memory models) ++ UCHAR RedMaskSize; // db ? ; size of direct color red mask in bits ++ UCHAR RedFieldPosition; // db ? ; bit position of lsb of red mask ++ UCHAR GreenMaskSize; // db ? ; size of direct color green mask in bits ++ UCHAR GreenFieldPosition; // db ? ; bit position of lsb of green mask ++ UCHAR BlueMaskSize; // db ? ; size of direct color blue mask in bits ++ UCHAR BlueFieldPosition; // db ? ; bit position of lsb of blue mask ++ UCHAR RsvdMaskSize; // db ? ; size of direct color reserved mask in bits ++ UCHAR RsvdFieldPosition; // db ? ; bit position of lsb of reserved mask ++ UCHAR DirectColorModeInfo;// db ? ; direct color mode attributes ++ ++//; Mandatory information for VBE 2.0 and above ++ ULONG PhysBasePtr; // dd ? ; physical address for flat memory frame buffer ++ ULONG Reserved_1; // dd 0 ; reserved - always set to 0 ++ USHORT Reserved_2; // dw 0 ; reserved - always set to 0 ++ ++//; Mandatory information for VBE 3.0 and above ++ USHORT LinBytesPerScanLine; // dw ? ; bytes per scan line for linear modes ++ UCHAR BnkNumberOfImagePages;// db ? ; number of images for banked modes ++ UCHAR LinNumberOfImagPages; // db ? ; number of images for linear modes ++ UCHAR LinRedMaskSize; // db ? ; size of direct color red mask(linear modes) ++ UCHAR LinRedFieldPosition; // db ? ; bit position of lsb of red mask(linear modes) ++ UCHAR LinGreenMaskSize; // db ? ; size of direct color green mask(linear modes) ++ UCHAR LinGreenFieldPosition;// db ? ; bit position of lsb of green mask(linear modes) ++ UCHAR LinBlueMaskSize; // db ? ; size of direct color blue mask(linear modes) ++ UCHAR LinBlueFieldPosition; // db ? ; bit position of lsb of blue mask(linear modes) ++ UCHAR LinRsvdMaskSize; // db ? ; size of direct color reserved mask(linear modes) ++ UCHAR LinRsvdFieldPosition; // db ? ; bit position of lsb of reserved mask(linear modes) ++ ULONG MaxPixelClock; // dd ? ; maximum pixel clock(in Hz) for graphics mode ++ UCHAR Reserved; // db 190 dup (0) ++} VESA_MODE_INFO_BLOCK; ++ ++// BIOS function CALLS ++#define ATOM_BIOS_EXTENDED_FUNCTION_CODE 0xA0 // ATI Extended Function code ++#define ATOM_BIOS_FUNCTION_COP_MODE 0x00 ++#define ATOM_BIOS_FUNCTION_SHORT_QUERY1 0x04 ++#define ATOM_BIOS_FUNCTION_SHORT_QUERY2 0x05 ++#define ATOM_BIOS_FUNCTION_SHORT_QUERY3 0x06 ++#define ATOM_BIOS_FUNCTION_GET_DDC 0x0B ++#define ATOM_BIOS_FUNCTION_ASIC_DSTATE 0x0E ++#define ATOM_BIOS_FUNCTION_DEBUG_PLAY 0x0F ++#define ATOM_BIOS_FUNCTION_STV_STD 0x16 ++#define ATOM_BIOS_FUNCTION_DEVICE_DET 0x17 ++#define ATOM_BIOS_FUNCTION_DEVICE_SWITCH 0x18 ++ ++#define ATOM_BIOS_FUNCTION_PANEL_CONTROL 0x82 ++#define ATOM_BIOS_FUNCTION_OLD_DEVICE_DET 0x83 ++#define ATOM_BIOS_FUNCTION_OLD_DEVICE_SWITCH 0x84 ++#define ATOM_BIOS_FUNCTION_HW_ICON 0x8A ++#define ATOM_BIOS_FUNCTION_SET_CMOS 0x8B ++#define SUB_FUNCTION_UPDATE_DISPLAY_INFO 0x8000 // Sub function 80 ++#define SUB_FUNCTION_UPDATE_EXPANSION_INFO 0x8100 // Sub function 80 ++ ++#define ATOM_BIOS_FUNCTION_DISPLAY_INFO 0x8D ++#define ATOM_BIOS_FUNCTION_DEVICE_ON_OFF 0x8E ++#define ATOM_BIOS_FUNCTION_VIDEO_STATE 0x8F ++#define ATOM_SUB_FUNCTION_GET_CRITICAL_STATE 0x0300 // Sub function 03 ++#define ATOM_SUB_FUNCTION_GET_LIDSTATE 0x0700 // Sub function 7 ++#define ATOM_SUB_FUNCTION_THERMAL_STATE_NOTICE 0x1400 // Notify caller the current thermal state ++#define ATOM_SUB_FUNCTION_CRITICAL_STATE_NOTICE 0x8300 // Notify caller the current critical state ++#define ATOM_SUB_FUNCTION_SET_LIDSTATE 0x8500 // Sub function 85 ++#define ATOM_SUB_FUNCTION_GET_REQ_DISPLAY_FROM_SBIOS_MODE 0x8900// Sub function 89 ++#define ATOM_SUB_FUNCTION_INFORM_ADC_SUPPORT 0x9400 // Notify caller that ADC is supported ++ ++ ++#define ATOM_BIOS_FUNCTION_VESA_DPMS 0x4F10 // Set DPMS ++#define ATOM_SUB_FUNCTION_SET_DPMS 0x0001 // BL: Sub function 01 ++#define ATOM_SUB_FUNCTION_GET_DPMS 0x0002 // BL: Sub function 02 ++#define ATOM_PARAMETER_VESA_DPMS_ON 0x0000 // BH Parameter for DPMS ON. ++#define ATOM_PARAMETER_VESA_DPMS_STANDBY 0x0100 // BH Parameter for DPMS STANDBY ++#define ATOM_PARAMETER_VESA_DPMS_SUSPEND 0x0200 // BH Parameter for DPMS SUSPEND ++#define ATOM_PARAMETER_VESA_DPMS_OFF 0x0400 // BH Parameter for DPMS OFF ++#define ATOM_PARAMETER_VESA_DPMS_REDUCE_ON 0x0800 // BH Parameter for DPMS REDUCE ON (NOT SUPPORTED) ++ ++#define ATOM_BIOS_RETURN_CODE_MASK 0x0000FF00L ++#define ATOM_BIOS_REG_HIGH_MASK 0x0000FF00L ++#define ATOM_BIOS_REG_LOW_MASK 0x000000FFL ++ ++// structure used for VBIOS only ++ ++//DispOutInfoTable ++typedef struct _ASIC_TRANSMITTER_INFO ++{ ++ USHORT usTransmitterObjId; ++ USHORT usSupportDevice; ++ UCHAR ucTransmitterCmdTblId; ++ UCHAR ucConfig; ++ UCHAR ucEncoderID; //available 1st encoder ( default ) ++ UCHAR ucOptionEncoderID; //available 2nd encoder ( optional ) ++ UCHAR uc2ndEncoderID; ++ UCHAR ucReserved; ++}ASIC_TRANSMITTER_INFO; ++ ++typedef struct _ASIC_ENCODER_INFO ++{ ++ UCHAR ucEncoderID; ++ UCHAR ucEncoderConfig; ++ USHORT usEncoderCmdTblId; ++}ASIC_ENCODER_INFO; ++ ++typedef struct _ATOM_DISP_OUT_INFO ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ USHORT ptrTransmitterInfo; ++ USHORT ptrEncoderInfo; ++ ASIC_TRANSMITTER_INFO asTransmitterInfo[1]; ++ ASIC_ENCODER_INFO asEncoderInfo[1]; ++}ATOM_DISP_OUT_INFO; ++ ++// DispDevicePriorityInfo ++typedef struct _ATOM_DISPLAY_DEVICE_PRIORITY_INFO ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ USHORT asDevicePriority[16]; ++}ATOM_DISPLAY_DEVICE_PRIORITY_INFO; ++ ++//ProcessAuxChannelTransactionTable ++typedef struct _PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS ++{ ++ USHORT lpAuxRequest; ++ USHORT lpDataOut; ++ UCHAR ucChannelID; ++ union ++ { ++ UCHAR ucReplyStatus; ++ UCHAR ucDelay; ++ }; ++ UCHAR ucDataOutLen; ++ UCHAR ucReserved; ++}PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS; ++ ++#define PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS ++ ++//GetSinkType ++ ++typedef struct _DP_ENCODER_SERVICE_PARAMETERS ++{ ++ USHORT ucLinkClock; ++ union ++ { ++ UCHAR ucConfig; // for DP training command ++ UCHAR ucI2cId; // use for GET_SINK_TYPE command ++ }; ++ UCHAR ucAction; ++ UCHAR ucStatus; ++ UCHAR ucLaneNum; ++ UCHAR ucReserved[2]; ++}DP_ENCODER_SERVICE_PARAMETERS; ++ ++// ucAction ++#define ATOM_DP_ACTION_GET_SINK_TYPE 0x01 ++#define ATOM_DP_ACTION_TRAINING_START 0x02 ++#define ATOM_DP_ACTION_TRAINING_COMPLETE 0x03 ++#define ATOM_DP_ACTION_TRAINING_PATTERN_SEL 0x04 ++#define ATOM_DP_ACTION_SET_VSWING_PREEMP 0x05 ++#define ATOM_DP_ACTION_GET_VSWING_PREEMP 0x06 ++#define ATOM_DP_ACTION_BLANKING 0x07 ++ ++// ucConfig ++#define ATOM_DP_CONFIG_ENCODER_SEL_MASK 0x03 ++#define ATOM_DP_CONFIG_DIG1_ENCODER 0x00 ++#define ATOM_DP_CONFIG_DIG2_ENCODER 0x01 ++#define ATOM_DP_CONFIG_EXTERNAL_ENCODER 0x02 ++#define ATOM_DP_CONFIG_LINK_SEL_MASK 0x04 ++#define ATOM_DP_CONFIG_LINK_A 0x00 ++#define ATOM_DP_CONFIG_LINK_B 0x04 ++ ++#define DP_ENCODER_SERVICE_PS_ALLOCATION WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS ++ ++// DP_TRAINING_TABLE ++#define DPCD_SET_LINKRATE_LANENUM_PATTERN1_TBL_ADDR ATOM_DP_TRAINING_TBL_ADDR ++#define DPCD_SET_SS_CNTL_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 8 ) ++#define DPCD_SET_LANE_VSWING_PREEMP_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 16 ) ++#define DPCD_SET_TRAINING_PATTERN0_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 24 ) ++#define DPCD_SET_TRAINING_PATTERN2_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 32) ++#define DPCD_GET_LINKRATE_LANENUM_SS_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 40) ++#define DPCD_GET_LANE_STATUS_ADJUST_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 48) ++#define DP_I2C_AUX_DDC_WRITE_START_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 60) ++#define DP_I2C_AUX_DDC_WRITE_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 64) ++#define DP_I2C_AUX_DDC_READ_START_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 72) ++#define DP_I2C_AUX_DDC_READ_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 76) ++#define DP_I2C_AUX_DDC_READ_END_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 80) ++ ++ ++typedef struct _PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS ++{ ++ UCHAR ucI2CSpeed; ++ union ++ { ++ UCHAR ucRegIndex; ++ UCHAR ucStatus; ++ }; ++ USHORT lpI2CDataOut; ++ UCHAR ucFlag; ++ UCHAR ucTransBytes; ++ UCHAR ucSlaveAddr; ++ UCHAR ucLineNumber; ++}PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS; ++ ++#define PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS ++ ++//ucFlag ++#define HW_I2C_WRITE 1 ++#define HW_I2C_READ 0 ++ ++ ++/****************************************************************************/ ++//Portion VI: Definitinos being oboselete ++/****************************************************************************/ ++ ++//========================================================================================== ++//Remove the definitions below when driver is ready! ++typedef struct _ATOM_DAC_INFO ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ USHORT usMaxFrequency; // in 10kHz unit ++ USHORT usReserved; ++}ATOM_DAC_INFO; ++ ++ ++typedef struct _COMPASSIONATE_DATA ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ ++ //============================== DAC1 portion ++ UCHAR ucDAC1_BG_Adjustment; ++ UCHAR ucDAC1_DAC_Adjustment; ++ USHORT usDAC1_FORCE_Data; ++ //============================== DAC2 portion ++ UCHAR ucDAC2_CRT2_BG_Adjustment; ++ UCHAR ucDAC2_CRT2_DAC_Adjustment; ++ USHORT usDAC2_CRT2_FORCE_Data; ++ USHORT usDAC2_CRT2_MUX_RegisterIndex; ++ UCHAR ucDAC2_CRT2_MUX_RegisterInfo; //Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low ++ UCHAR ucDAC2_NTSC_BG_Adjustment; ++ UCHAR ucDAC2_NTSC_DAC_Adjustment; ++ USHORT usDAC2_TV1_FORCE_Data; ++ USHORT usDAC2_TV1_MUX_RegisterIndex; ++ UCHAR ucDAC2_TV1_MUX_RegisterInfo; //Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low ++ UCHAR ucDAC2_CV_BG_Adjustment; ++ UCHAR ucDAC2_CV_DAC_Adjustment; ++ USHORT usDAC2_CV_FORCE_Data; ++ USHORT usDAC2_CV_MUX_RegisterIndex; ++ UCHAR ucDAC2_CV_MUX_RegisterInfo; //Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low ++ UCHAR ucDAC2_PAL_BG_Adjustment; ++ UCHAR ucDAC2_PAL_DAC_Adjustment; ++ USHORT usDAC2_TV2_FORCE_Data; ++}COMPASSIONATE_DATA; ++ ++/****************************Supported Device Info Table Definitions**********************/ ++// ucConnectInfo: ++// [7:4] - connector type ++// = 1 - VGA connector ++// = 2 - DVI-I ++// = 3 - DVI-D ++// = 4 - DVI-A ++// = 5 - SVIDEO ++// = 6 - COMPOSITE ++// = 7 - LVDS ++// = 8 - DIGITAL LINK ++// = 9 - SCART ++// = 0xA - HDMI_type A ++// = 0xB - HDMI_type B ++// = 0xE - Special case1 (DVI+DIN) ++// Others=TBD ++// [3:0] - DAC Associated ++// = 0 - no DAC ++// = 1 - DACA ++// = 2 - DACB ++// = 3 - External DAC ++// Others=TBD ++// ++ ++typedef struct _ATOM_CONNECTOR_INFO ++{ ++#if ATOM_BIG_ENDIAN ++ UCHAR bfConnectorType:4; ++ UCHAR bfAssociatedDAC:4; ++#else ++ UCHAR bfAssociatedDAC:4; ++ UCHAR bfConnectorType:4; ++#endif ++}ATOM_CONNECTOR_INFO; ++ ++typedef union _ATOM_CONNECTOR_INFO_ACCESS ++{ ++ ATOM_CONNECTOR_INFO sbfAccess; ++ UCHAR ucAccess; ++}ATOM_CONNECTOR_INFO_ACCESS; ++ ++typedef struct _ATOM_CONNECTOR_INFO_I2C ++{ ++ ATOM_CONNECTOR_INFO_ACCESS sucConnectorInfo; ++ ATOM_I2C_ID_CONFIG_ACCESS sucI2cId; ++}ATOM_CONNECTOR_INFO_I2C; ++ ++ ++typedef struct _ATOM_SUPPORTED_DEVICES_INFO ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ USHORT usDeviceSupport; ++ ATOM_CONNECTOR_INFO_I2C asConnInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO]; ++}ATOM_SUPPORTED_DEVICES_INFO; ++ ++#define NO_INT_SRC_MAPPED 0xFF ++ ++typedef struct _ATOM_CONNECTOR_INC_SRC_BITMAP ++{ ++ UCHAR ucIntSrcBitmap; ++}ATOM_CONNECTOR_INC_SRC_BITMAP; ++ ++typedef struct _ATOM_SUPPORTED_DEVICES_INFO_2 ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ USHORT usDeviceSupport; ++ ATOM_CONNECTOR_INFO_I2C asConnInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO_2]; ++ ATOM_CONNECTOR_INC_SRC_BITMAP asIntSrcInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO_2]; ++}ATOM_SUPPORTED_DEVICES_INFO_2; ++ ++typedef struct _ATOM_SUPPORTED_DEVICES_INFO_2d1 ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ USHORT usDeviceSupport; ++ ATOM_CONNECTOR_INFO_I2C asConnInfo[ATOM_MAX_SUPPORTED_DEVICE]; ++ ATOM_CONNECTOR_INC_SRC_BITMAP asIntSrcInfo[ATOM_MAX_SUPPORTED_DEVICE]; ++}ATOM_SUPPORTED_DEVICES_INFO_2d1; ++ ++#define ATOM_SUPPORTED_DEVICES_INFO_LAST ATOM_SUPPORTED_DEVICES_INFO_2d1 ++ ++ ++ ++typedef struct _ATOM_MISC_CONTROL_INFO ++{ ++ USHORT usFrequency; ++ UCHAR ucPLL_ChargePump; // PLL charge-pump gain control ++ UCHAR ucPLL_DutyCycle; // PLL duty cycle control ++ UCHAR ucPLL_VCO_Gain; // PLL VCO gain control ++ UCHAR ucPLL_VoltageSwing; // PLL driver voltage swing control ++}ATOM_MISC_CONTROL_INFO; ++ ++ ++#define ATOM_MAX_MISC_INFO 4 ++ ++typedef struct _ATOM_TMDS_INFO ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ USHORT usMaxFrequency; // in 10Khz ++ ATOM_MISC_CONTROL_INFO asMiscInfo[ATOM_MAX_MISC_INFO]; ++}ATOM_TMDS_INFO; ++ ++ ++typedef struct _ATOM_ENCODER_ANALOG_ATTRIBUTE ++{ ++ UCHAR ucTVStandard; //Same as TV standards defined above, ++ UCHAR ucPadding[1]; ++}ATOM_ENCODER_ANALOG_ATTRIBUTE; ++ ++typedef struct _ATOM_ENCODER_DIGITAL_ATTRIBUTE ++{ ++ UCHAR ucAttribute; //Same as other digital encoder attributes defined above ++ UCHAR ucPadding[1]; ++}ATOM_ENCODER_DIGITAL_ATTRIBUTE; ++ ++typedef union _ATOM_ENCODER_ATTRIBUTE ++{ ++ ATOM_ENCODER_ANALOG_ATTRIBUTE sAlgAttrib; ++ ATOM_ENCODER_DIGITAL_ATTRIBUTE sDigAttrib; ++}ATOM_ENCODER_ATTRIBUTE; ++ ++ ++typedef struct _DVO_ENCODER_CONTROL_PARAMETERS ++{ ++ USHORT usPixelClock; ++ USHORT usEncoderID; ++ UCHAR ucDeviceType; //Use ATOM_DEVICE_xxx1_Index to indicate device type only. ++ UCHAR ucAction; //ATOM_ENABLE/ATOM_DISABLE/ATOM_HPD_INIT ++ ATOM_ENCODER_ATTRIBUTE usDevAttr; ++}DVO_ENCODER_CONTROL_PARAMETERS; ++ ++typedef struct _DVO_ENCODER_CONTROL_PS_ALLOCATION ++{ ++ DVO_ENCODER_CONTROL_PARAMETERS sDVOEncoder; ++ WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; //Caller doesn't need to init this portion ++}DVO_ENCODER_CONTROL_PS_ALLOCATION; ++ ++ ++#define ATOM_XTMDS_ASIC_SI164_ID 1 ++#define ATOM_XTMDS_ASIC_SI178_ID 2 ++#define ATOM_XTMDS_ASIC_TFP513_ID 3 ++#define ATOM_XTMDS_SUPPORTED_SINGLELINK 0x00000001 ++#define ATOM_XTMDS_SUPPORTED_DUALLINK 0x00000002 ++#define ATOM_XTMDS_MVPU_FPGA 0x00000004 ++ ++ ++typedef struct _ATOM_XTMDS_INFO ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ USHORT usSingleLinkMaxFrequency; ++ ATOM_I2C_ID_CONFIG_ACCESS sucI2cId; //Point the ID on which I2C is used to control external chip ++ UCHAR ucXtransimitterID; ++ UCHAR ucSupportedLink; // Bit field, bit0=1, single link supported;bit1=1,dual link supported ++ UCHAR ucSequnceAlterID; // Even with the same external TMDS asic, it's possible that the program seqence alters ++ // due to design. This ID is used to alert driver that the sequence is not "standard"! ++ UCHAR ucMasterAddress; // Address to control Master xTMDS Chip ++ UCHAR ucSlaveAddress; // Address to control Slave xTMDS Chip ++}ATOM_XTMDS_INFO; ++ ++typedef struct _DFP_DPMS_STATUS_CHANGE_PARAMETERS ++{ ++ UCHAR ucEnable; // ATOM_ENABLE=On or ATOM_DISABLE=Off ++ UCHAR ucDevice; // ATOM_DEVICE_DFP1_INDEX.... ++ UCHAR ucPadding[2]; ++}DFP_DPMS_STATUS_CHANGE_PARAMETERS; ++ ++/****************************Legacy Power Play Table Definitions **********************/ ++ ++//Definitions for ulPowerPlayMiscInfo ++#define ATOM_PM_MISCINFO_SPLIT_CLOCK 0x00000000L ++#define ATOM_PM_MISCINFO_USING_MCLK_SRC 0x00000001L ++#define ATOM_PM_MISCINFO_USING_SCLK_SRC 0x00000002L ++ ++#define ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT 0x00000004L ++#define ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH 0x00000008L ++ ++#define ATOM_PM_MISCINFO_LOAD_PERFORMANCE_EN 0x00000010L ++ ++#define ATOM_PM_MISCINFO_ENGINE_CLOCK_CONTRL_EN 0x00000020L ++#define ATOM_PM_MISCINFO_MEMORY_CLOCK_CONTRL_EN 0x00000040L ++#define ATOM_PM_MISCINFO_PROGRAM_VOLTAGE 0x00000080L //When this bit set, ucVoltageDropIndex is not an index for GPIO pin, but a voltage ID that SW needs program ++ ++#define ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN 0x00000100L ++#define ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN 0x00000200L ++#define ATOM_PM_MISCINFO_ASIC_SLEEP_MODE_EN 0x00000400L ++#define ATOM_PM_MISCINFO_LOAD_BALANCE_EN 0x00000800L ++#define ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE 0x00001000L ++#define ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE 0x00002000L ++#define ATOM_PM_MISCINFO_LOW_LCD_REFRESH_RATE 0x00004000L ++ ++#define ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE 0x00008000L ++#define ATOM_PM_MISCINFO_OVER_CLOCK_MODE 0x00010000L ++#define ATOM_PM_MISCINFO_OVER_DRIVE_MODE 0x00020000L ++#define ATOM_PM_MISCINFO_POWER_SAVING_MODE 0x00040000L ++#define ATOM_PM_MISCINFO_THERMAL_DIODE_MODE 0x00080000L ++ ++#define ATOM_PM_MISCINFO_FRAME_MODULATION_MASK 0x00300000L //0-FM Disable, 1-2 level FM, 2-4 level FM, 3-Reserved ++#define ATOM_PM_MISCINFO_FRAME_MODULATION_SHIFT 20 ++ ++#define ATOM_PM_MISCINFO_DYN_CLK_3D_IDLE 0x00400000L ++#define ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2 0x00800000L ++#define ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4 0x01000000L ++#define ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN 0x02000000L //When set, Dynamic ++#define ATOM_PM_MISCINFO_DYNAMIC_MC_HOST_BLOCK_EN 0x04000000L //When set, Dynamic ++#define ATOM_PM_MISCINFO_3D_ACCELERATION_EN 0x08000000L //When set, This mode is for acceleated 3D mode ++ ++#define ATOM_PM_MISCINFO_POWERPLAY_SETTINGS_GROUP_MASK 0x70000000L //1-Optimal Battery Life Group, 2-High Battery, 3-Balanced, 4-High Performance, 5- Optimal Performance (Default state with Default clocks) ++#define ATOM_PM_MISCINFO_POWERPLAY_SETTINGS_GROUP_SHIFT 28 ++#define ATOM_PM_MISCINFO_ENABLE_BACK_BIAS 0x80000000L ++ ++#define ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE 0x00000001L ++#define ATOM_PM_MISCINFO2_MULTI_DISPLAY_SUPPORT 0x00000002L ++#define ATOM_PM_MISCINFO2_DYNAMIC_BACK_BIAS_EN 0x00000004L ++#define ATOM_PM_MISCINFO2_FS3D_OVERDRIVE_INFO 0x00000008L ++#define ATOM_PM_MISCINFO2_FORCEDLOWPWR_MODE 0x00000010L ++#define ATOM_PM_MISCINFO2_VDDCI_DYNAMIC_VOLTAGE_EN 0x00000020L ++#define ATOM_PM_MISCINFO2_VIDEO_PLAYBACK_CAPABLE 0x00000040L //If this bit is set in multi-pp mode, then driver will pack up one with the minior power consumption. ++ //If it's not set in any pp mode, driver will use its default logic to pick a pp mode in video playback ++#define ATOM_PM_MISCINFO2_NOT_VALID_ON_DC 0x00000080L ++#define ATOM_PM_MISCINFO2_STUTTER_MODE_EN 0x00000100L ++#define ATOM_PM_MISCINFO2_UVD_SUPPORT_MODE 0x00000200L ++ ++//ucTableFormatRevision=1 ++//ucTableContentRevision=1 ++typedef struct _ATOM_POWERMODE_INFO ++{ ++ ULONG ulMiscInfo; //The power level should be arranged in ascending order ++ ULONG ulReserved1; // must set to 0 ++ ULONG ulReserved2; // must set to 0 ++ USHORT usEngineClock; ++ USHORT usMemoryClock; ++ UCHAR ucVoltageDropIndex; // index to GPIO table ++ UCHAR ucSelectedPanel_RefreshRate;// panel refresh rate ++ UCHAR ucMinTemperature; ++ UCHAR ucMaxTemperature; ++ UCHAR ucNumPciELanes; // number of PCIE lanes ++}ATOM_POWERMODE_INFO; ++ ++//ucTableFormatRevision=2 ++//ucTableContentRevision=1 ++typedef struct _ATOM_POWERMODE_INFO_V2 ++{ ++ ULONG ulMiscInfo; //The power level should be arranged in ascending order ++ ULONG ulMiscInfo2; ++ ULONG ulEngineClock; ++ ULONG ulMemoryClock; ++ UCHAR ucVoltageDropIndex; // index to GPIO table ++ UCHAR ucSelectedPanel_RefreshRate;// panel refresh rate ++ UCHAR ucMinTemperature; ++ UCHAR ucMaxTemperature; ++ UCHAR ucNumPciELanes; // number of PCIE lanes ++}ATOM_POWERMODE_INFO_V2; ++ ++//ucTableFormatRevision=2 ++//ucTableContentRevision=2 ++typedef struct _ATOM_POWERMODE_INFO_V3 ++{ ++ ULONG ulMiscInfo; //The power level should be arranged in ascending order ++ ULONG ulMiscInfo2; ++ ULONG ulEngineClock; ++ ULONG ulMemoryClock; ++ UCHAR ucVoltageDropIndex; // index to Core (VDDC) votage table ++ UCHAR ucSelectedPanel_RefreshRate;// panel refresh rate ++ UCHAR ucMinTemperature; ++ UCHAR ucMaxTemperature; ++ UCHAR ucNumPciELanes; // number of PCIE lanes ++ UCHAR ucVDDCI_VoltageDropIndex; // index to VDDCI votage table ++}ATOM_POWERMODE_INFO_V3; ++ ++ ++#define ATOM_MAX_NUMBEROF_POWER_BLOCK 8 ++ ++#define ATOM_PP_OVERDRIVE_INTBITMAP_AUXWIN 0x01 ++#define ATOM_PP_OVERDRIVE_INTBITMAP_OVERDRIVE 0x02 ++ ++#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_LM63 0x01 ++#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_ADM1032 0x02 ++#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_ADM1030 0x03 ++#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_MUA6649 0x04 ++#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_LM64 0x05 ++#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_F75375 0x06 ++#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_ASC7512 0x07 // Andigilog ++ ++ ++typedef struct _ATOM_POWERPLAY_INFO ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ UCHAR ucOverdriveThermalController; ++ UCHAR ucOverdriveI2cLine; ++ UCHAR ucOverdriveIntBitmap; ++ UCHAR ucOverdriveControllerAddress; ++ UCHAR ucSizeOfPowerModeEntry; ++ UCHAR ucNumOfPowerModeEntries; ++ ATOM_POWERMODE_INFO asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK]; ++}ATOM_POWERPLAY_INFO; ++ ++typedef struct _ATOM_POWERPLAY_INFO_V2 ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ UCHAR ucOverdriveThermalController; ++ UCHAR ucOverdriveI2cLine; ++ UCHAR ucOverdriveIntBitmap; ++ UCHAR ucOverdriveControllerAddress; ++ UCHAR ucSizeOfPowerModeEntry; ++ UCHAR ucNumOfPowerModeEntries; ++ ATOM_POWERMODE_INFO_V2 asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK]; ++}ATOM_POWERPLAY_INFO_V2; ++ ++typedef struct _ATOM_POWERPLAY_INFO_V3 ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ UCHAR ucOverdriveThermalController; ++ UCHAR ucOverdriveI2cLine; ++ UCHAR ucOverdriveIntBitmap; ++ UCHAR ucOverdriveControllerAddress; ++ UCHAR ucSizeOfPowerModeEntry; ++ UCHAR ucNumOfPowerModeEntries; ++ ATOM_POWERMODE_INFO_V3 asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK]; ++}ATOM_POWERPLAY_INFO_V3; ++ ++ ++ ++/**************************************************************************/ ++ ++ ++// Following definitions are for compatiblity issue in different SW components. ++#define ATOM_MASTER_DATA_TABLE_REVISION 0x01 ++#define Object_Info Object_Header ++#define AdjustARB_SEQ MC_InitParameter ++#define VRAM_GPIO_DetectionInfo VoltageObjectInfo ++#define ASIC_VDDCI_Info ASIC_ProfilingInfo ++#define ASIC_MVDDQ_Info MemoryTrainingInfo ++#define SS_Info PPLL_SS_Info ++#define ASIC_MVDDC_Info ASIC_InternalSS_Info ++#define DispDevicePriorityInfo SaveRestoreInfo ++#define DispOutInfo TV_VideoMode ++ ++ ++#define ATOM_ENCODER_OBJECT_TABLE ATOM_OBJECT_TABLE ++#define ATOM_CONNECTOR_OBJECT_TABLE ATOM_OBJECT_TABLE ++ ++//New device naming, remove them when both DAL/VBIOS is ready ++#define DFP2I_OUTPUT_CONTROL_PARAMETERS CRT1_OUTPUT_CONTROL_PARAMETERS ++#define DFP2I_OUTPUT_CONTROL_PS_ALLOCATION DFP2I_OUTPUT_CONTROL_PARAMETERS ++ ++#define DFP1X_OUTPUT_CONTROL_PARAMETERS CRT1_OUTPUT_CONTROL_PARAMETERS ++#define DFP1X_OUTPUT_CONTROL_PS_ALLOCATION DFP1X_OUTPUT_CONTROL_PARAMETERS ++ ++#define DFP1I_OUTPUT_CONTROL_PARAMETERS DFP1_OUTPUT_CONTROL_PARAMETERS ++#define DFP1I_OUTPUT_CONTROL_PS_ALLOCATION DFP1_OUTPUT_CONTROL_PS_ALLOCATION ++ ++#define ATOM_DEVICE_DFP1I_SUPPORT ATOM_DEVICE_DFP1_SUPPORT ++#define ATOM_DEVICE_DFP1X_SUPPORT ATOM_DEVICE_DFP2_SUPPORT ++ ++#define ATOM_DEVICE_DFP1I_INDEX ATOM_DEVICE_DFP1_INDEX ++#define ATOM_DEVICE_DFP1X_INDEX ATOM_DEVICE_DFP2_INDEX ++ ++#define ATOM_DEVICE_DFP2I_INDEX 0x00000009 ++#define ATOM_DEVICE_DFP2I_SUPPORT (0x1L << ATOM_DEVICE_DFP2I_INDEX) ++ ++#define ATOM_S0_DFP1I ATOM_S0_DFP1 ++#define ATOM_S0_DFP1X ATOM_S0_DFP2 ++ ++#define ATOM_S0_DFP2I 0x00200000L ++#define ATOM_S0_DFP2Ib2 0x20 ++ ++#define ATOM_S2_DFP1I_DPMS_STATE ATOM_S2_DFP1_DPMS_STATE ++#define ATOM_S2_DFP1X_DPMS_STATE ATOM_S2_DFP2_DPMS_STATE ++ ++#define ATOM_S2_DFP2I_DPMS_STATE 0x02000000L ++#define ATOM_S2_DFP2I_DPMS_STATEb3 0x02 ++ ++#define ATOM_S3_DFP2I_ACTIVEb1 0x02 ++ ++#define ATOM_S3_DFP1I_ACTIVE ATOM_S3_DFP1_ACTIVE ++#define ATOM_S3_DFP1X_ACTIVE ATOM_S3_DFP2_ACTIVE ++ ++#define ATOM_S3_DFP2I_ACTIVE 0x00000200L ++ ++#define ATOM_S3_DFP1I_CRTC_ACTIVE ATOM_S3_DFP1_CRTC_ACTIVE ++#define ATOM_S3_DFP1X_CRTC_ACTIVE ATOM_S3_DFP2_CRTC_ACTIVE ++#define ATOM_S3_DFP2I_CRTC_ACTIVE 0x02000000L ++ ++#define ATOM_S3_DFP2I_CRTC_ACTIVEb3 0x02 ++#define ATOM_S5_DOS_REQ_DFP2Ib1 0x02 ++ ++#define ATOM_S5_DOS_REQ_DFP2I 0x0200 ++#define ATOM_S6_ACC_REQ_DFP1I ATOM_S6_ACC_REQ_DFP1 ++#define ATOM_S6_ACC_REQ_DFP1X ATOM_S6_ACC_REQ_DFP2 ++ ++#define ATOM_S6_ACC_REQ_DFP2Ib3 0x02 ++#define ATOM_S6_ACC_REQ_DFP2I 0x02000000L ++ ++#define TMDS1XEncoderControl DVOEncoderControl ++#define DFP1XOutputControl DVOOutputControl ++ ++#define ExternalDFPOutputControl DFP1XOutputControl ++#define EnableExternalTMDS_Encoder TMDS1XEncoderControl ++ ++#define DFP1IOutputControl TMDSAOutputControl ++#define DFP2IOutputControl LVTMAOutputControl ++ ++#define DAC1_ENCODER_CONTROL_PARAMETERS DAC_ENCODER_CONTROL_PARAMETERS ++#define DAC1_ENCODER_CONTROL_PS_ALLOCATION DAC_ENCODER_CONTROL_PS_ALLOCATION ++ ++#define DAC2_ENCODER_CONTROL_PARAMETERS DAC_ENCODER_CONTROL_PARAMETERS ++#define DAC2_ENCODER_CONTROL_PS_ALLOCATION DAC_ENCODER_CONTROL_PS_ALLOCATION ++ ++#define ucDac1Standard ucDacStandard ++#define ucDac2Standard ucDacStandard ++ ++#define TMDS1EncoderControl TMDSAEncoderControl ++#define TMDS2EncoderControl LVTMAEncoderControl ++ ++#define DFP1OutputControl TMDSAOutputControl ++#define DFP2OutputControl LVTMAOutputControl ++#define CRT1OutputControl DAC1OutputControl ++#define CRT2OutputControl DAC2OutputControl ++ ++//These two lines will be removed for sure in a few days, will follow up with Michael V. ++#define EnableLVDS_SS EnableSpreadSpectrumOnPPLL ++#define ENABLE_LVDS_SS_PARAMETERS_V3 ENABLE_SPREAD_SPECTRUM_ON_PPLL ++ ++/*********************************************************************************/ ++ ++#pragma pack() // BIOS data must use byte aligment ++ ++#endif /* _ATOMBIOS_H */ +diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c +new file mode 100644 +index 0000000..a813ba9 +--- /dev/null ++++ b/drivers/gpu/drm/radeon/atombios_crtc.c +@@ -0,0 +1,461 @@ ++/* ++ * Copyright 2007-8 Advanced Micro Devices, Inc. ++ * Copyright 2008 Red Hat Inc. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: Dave Airlie ++ * Alex Deucher ++ */ ++#include "drmP.h" ++#include "radeon_drm.h" ++#include "radeon_drv.h" ++ ++#include "drm_crtc_helper.h" ++#include "atom.h" ++#include "atom-bits.h" ++ ++static void atombios_lock_crtc(struct drm_crtc *crtc, int lock) ++{ ++ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); ++ struct drm_device *dev = crtc->dev; ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ int index = GetIndexIntoMasterTable(COMMAND, UpdateCRTC_DoubleBufferRegisters); ++ ENABLE_CRTC_PS_ALLOCATION args; ++ ++ memset(&args, 0, sizeof(args)); ++ ++ args.ucCRTC = radeon_crtc->crtc_id; ++ args.ucEnable = lock; ++ ++ atom_execute_table(dev_priv->mode_info.atom_context, index, (uint32_t *)&args); ++} ++ ++static void atombios_enable_crtc(struct drm_crtc *crtc, int state) ++{ ++ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); ++ struct drm_device *dev = crtc->dev; ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ int index = GetIndexIntoMasterTable(COMMAND, EnableCRTC); ++ ENABLE_CRTC_PS_ALLOCATION args; ++ ++ memset(&args, 0, sizeof(args)); ++ ++ args.ucCRTC = radeon_crtc->crtc_id; ++ args.ucEnable = state; ++ ++ atom_execute_table(dev_priv->mode_info.atom_context, index, (uint32_t *)&args); ++} ++ ++static void atombios_enable_crtc_memreq(struct drm_crtc *crtc, int state) ++{ ++ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); ++ struct drm_device *dev = crtc->dev; ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ int index = GetIndexIntoMasterTable(COMMAND, EnableCRTCMemReq); ++ ENABLE_CRTC_PS_ALLOCATION args; ++ ++ memset(&args, 0, sizeof(args)); ++ ++ args.ucCRTC = radeon_crtc->crtc_id; ++ args.ucEnable = state; ++ ++ atom_execute_table(dev_priv->mode_info.atom_context, index, (uint32_t *)&args); ++} ++ ++static void atombios_blank_crtc(struct drm_crtc *crtc, int state) ++{ ++ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); ++ struct drm_device *dev = crtc->dev; ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ int index = GetIndexIntoMasterTable(COMMAND, BlankCRTC); ++ BLANK_CRTC_PS_ALLOCATION args; ++ ++ memset(&args, 0, sizeof(args)); ++ ++ args.ucCRTC = radeon_crtc->crtc_id; ++ args.ucBlanking = state; ++ ++ atom_execute_table(dev_priv->mode_info.atom_context, index, (uint32_t *)&args); ++} ++ ++void atombios_crtc_dpms(struct drm_crtc *crtc, int mode) ++{ ++ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); ++ struct drm_device *dev = crtc->dev; ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ ++ switch(mode) { ++ case DRM_MODE_DPMS_ON: ++ case DRM_MODE_DPMS_STANDBY: ++ case DRM_MODE_DPMS_SUSPEND: ++ if (radeon_is_dce3(dev_priv)) ++ atombios_enable_crtc_memreq(crtc, 1); ++ atombios_enable_crtc(crtc, 1); ++ atombios_blank_crtc(crtc, 0); ++ ++ radeon_crtc_load_lut(crtc); ++ break; ++ case DRM_MODE_DPMS_OFF: ++ atombios_blank_crtc(crtc, 1); ++ atombios_enable_crtc(crtc, 0); ++ if (radeon_is_dce3(dev_priv)) ++ atombios_enable_crtc_memreq(crtc, 0); ++ break; ++ } ++} ++ ++static void ++atombios_set_crtc_dtd_timing(struct drm_crtc *crtc, SET_CRTC_USING_DTD_TIMING_PARAMETERS *crtc_param) ++{ ++ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); ++ struct drm_device *dev = crtc->dev; ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ SET_CRTC_USING_DTD_TIMING_PARAMETERS conv_param; ++ int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_UsingDTDTiming); ++ ++ conv_param.usH_Size = cpu_to_le16(crtc_param->usH_Size); ++ conv_param.usH_Blanking_Time = cpu_to_le16(crtc_param->usH_Blanking_Time); ++ conv_param.usV_Size = cpu_to_le16(crtc_param->usV_Size); ++ conv_param.usV_Blanking_Time = cpu_to_le16(crtc_param->usV_Blanking_Time); ++ conv_param.usH_SyncOffset = cpu_to_le16(crtc_param->usH_SyncOffset); ++ conv_param.usH_SyncWidth = cpu_to_le16(crtc_param->usH_SyncWidth); ++ conv_param.usV_SyncOffset = cpu_to_le16(crtc_param->usV_SyncOffset); ++ conv_param.usV_SyncWidth = cpu_to_le16(crtc_param->usV_SyncWidth); ++ conv_param.susModeMiscInfo.usAccess = cpu_to_le16(crtc_param->susModeMiscInfo.usAccess); ++ conv_param.ucCRTC = crtc_param->ucCRTC; ++ ++ printk("executing set crtc dtd timing\n"); ++ atom_execute_table(dev_priv->mode_info.atom_context, index, (uint32_t *)&conv_param); ++} ++ ++void atombios_crtc_set_timing(struct drm_crtc *crtc, SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION *crtc_param) ++{ ++ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); ++ struct drm_device *dev = crtc->dev; ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION conv_param; ++ int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_Timing); ++ ++ conv_param.usH_Total = cpu_to_le16(crtc_param->usH_Total); ++ conv_param.usH_Disp = cpu_to_le16(crtc_param->usH_Disp); ++ conv_param.usH_SyncStart = cpu_to_le16(crtc_param->usH_SyncStart); ++ conv_param.usH_SyncWidth = cpu_to_le16(crtc_param->usH_SyncWidth); ++ conv_param.usV_Total = cpu_to_le16(crtc_param->usV_Total); ++ conv_param.usV_Disp = cpu_to_le16(crtc_param->usV_Disp); ++ conv_param.usV_SyncStart = cpu_to_le16(crtc_param->usV_SyncStart); ++ conv_param.usV_SyncWidth = cpu_to_le16(crtc_param->usV_SyncWidth); ++ conv_param.susModeMiscInfo.usAccess = cpu_to_le16(crtc_param->susModeMiscInfo.usAccess); ++ conv_param.ucCRTC = crtc_param->ucCRTC; ++ conv_param.ucOverscanRight = crtc_param->ucOverscanRight; ++ conv_param.ucOverscanLeft = crtc_param->ucOverscanLeft; ++ conv_param.ucOverscanBottom = crtc_param->ucOverscanBottom; ++ conv_param.ucOverscanTop = crtc_param->ucOverscanTop; ++ conv_param.ucReserved = crtc_param->ucReserved; ++ ++ printk("executing set crtc timing\n"); ++ atom_execute_table(dev_priv->mode_info.atom_context, index, (uint32_t *)&conv_param); ++} ++ ++void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) ++{ ++ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); ++ struct drm_device *dev = crtc->dev; ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ uint8_t frev, crev; ++ int index = GetIndexIntoMasterTable(COMMAND, SetPixelClock); ++ SET_PIXEL_CLOCK_PS_ALLOCATION spc_param; ++ PIXEL_CLOCK_PARAMETERS_V2 *spc2_ptr; ++ PIXEL_CLOCK_PARAMETERS_V3 *spc3_ptr; ++ uint32_t sclock = mode->clock; ++ uint32_t ref_div = 0, fb_div = 0, post_div = 0; ++ struct radeon_pll *pll; ++ int pll_flags = 0; ++ ++ memset(&spc_param, 0, sizeof(SET_PIXEL_CLOCK_PS_ALLOCATION)); ++ ++ if (!radeon_is_avivo(dev_priv)) ++ pll_flags |= RADEON_PLL_LEGACY; ++ ++ if (mode->clock > 200000) /* range limits??? */ ++ pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; ++ else ++ pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV; ++ ++ if (radeon_crtc->crtc_id == 0) ++ pll = &dev_priv->mode_info.p1pll; ++ else ++ pll = &dev_priv->mode_info.p2pll; ++ ++ radeon_compute_pll(pll, mode->clock, &sclock, ++ &fb_div, &ref_div, &post_div, pll_flags); ++ ++ if (radeon_is_avivo(dev_priv)) { ++ uint32_t ss_cntl; ++ if (radeon_crtc->crtc_id == 0) { ++ ss_cntl = RADEON_READ(AVIVO_P1PLL_INT_SS_CNTL); ++ RADEON_WRITE(AVIVO_P1PLL_INT_SS_CNTL, ss_cntl & ~1); ++ } else { ++ ss_cntl = RADEON_READ(AVIVO_P2PLL_INT_SS_CNTL); ++ RADEON_WRITE(AVIVO_P2PLL_INT_SS_CNTL, ss_cntl & ~1); ++ } ++ } ++ ++ /* */ ++ ++ atom_parse_cmd_header(dev_priv->mode_info.atom_context, index, &frev, &crev); ++ ++ switch(frev) { ++ case 1: ++ switch(crev) { ++ case 1: ++ case 2: ++ spc2_ptr = (PIXEL_CLOCK_PARAMETERS_V2*)&spc_param.sPCLKInput; ++ spc2_ptr->usPixelClock = cpu_to_le16(sclock); ++ spc2_ptr->usRefDiv = cpu_to_le16(ref_div); ++ spc2_ptr->usFbDiv = cpu_to_le16(fb_div); ++ spc2_ptr->ucPostDiv = post_div; ++ spc2_ptr->ucPpll = radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1; ++ spc2_ptr->ucCRTC = radeon_crtc->crtc_id; ++ spc2_ptr->ucRefDivSrc = 1; ++ break; ++ case 3: ++ spc3_ptr = (PIXEL_CLOCK_PARAMETERS_V3*)&spc_param.sPCLKInput; ++ spc3_ptr->usPixelClock = cpu_to_le16(sclock); ++ spc3_ptr->usRefDiv = cpu_to_le16(ref_div); ++ spc3_ptr->usFbDiv = cpu_to_le16(fb_div); ++ spc3_ptr->ucPostDiv = post_div; ++ spc3_ptr->ucPpll = radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1; ++ spc3_ptr->ucMiscInfo = (radeon_crtc->crtc_id << 2); ++ ++ /* TODO insert output encoder object stuff herre for r600 */ ++ break; ++ default: ++ DRM_ERROR("Unknown table version %d %d\n", frev, crev); ++ return; ++ } ++ break; ++ default: ++ DRM_ERROR("Unknown table version %d %d\n", frev, crev); ++ return; ++ } ++ ++ printk("executing set pll\n"); ++ atom_execute_table(dev_priv->mode_info.atom_context, index, (uint32_t *)&spc_param); ++} ++ ++void atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y) ++{ ++ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); ++ struct drm_device *dev = crtc->dev; ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ struct radeon_framebuffer *radeon_fb; ++ struct drm_gem_object *obj; ++ struct drm_radeon_gem_object *obj_priv; ++ uint32_t fb_location, fb_format, fb_pitch_pixels; ++ ++ if (!crtc->fb) ++ return; ++ ++ radeon_fb = to_radeon_framebuffer(crtc->fb); ++ ++ obj = radeon_fb->obj; ++ obj_priv = obj->driver_private; ++ ++ fb_location = obj_priv->bo->offset + dev_priv->fb_location; ++ ++ switch(crtc->fb->bits_per_pixel) { ++ case 15: ++ fb_format = AVIVO_D1GRPH_CONTROL_DEPTH_16BPP | AVIVO_D1GRPH_CONTROL_16BPP_ARGB1555; ++ break; ++ case 16: ++ fb_format = AVIVO_D1GRPH_CONTROL_DEPTH_16BPP | AVIVO_D1GRPH_CONTROL_16BPP_RGB565; ++ break; ++ case 24: ++ case 32: ++ fb_format = AVIVO_D1GRPH_CONTROL_DEPTH_32BPP | AVIVO_D1GRPH_CONTROL_32BPP_ARGB8888; ++ break; ++ default: ++ DRM_ERROR("Unsupported screen depth %d\n", crtc->fb->bits_per_pixel); ++ return; ++ } ++ ++ /* TODO tiling */ ++ if (radeon_crtc->crtc_id == 0) ++ RADEON_WRITE(AVIVO_D1VGA_CONTROL, 0); ++ else ++ RADEON_WRITE(AVIVO_D2VGA_CONTROL, 0); ++ ++ RADEON_WRITE(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, fb_location); ++ RADEON_WRITE(AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, fb_location); ++ RADEON_WRITE(AVIVO_D1GRPH_CONTROL + radeon_crtc->crtc_offset, fb_format); ++ ++ RADEON_WRITE(AVIVO_D1GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0); ++ RADEON_WRITE(AVIVO_D1GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0); ++ RADEON_WRITE(AVIVO_D1GRPH_X_START + radeon_crtc->crtc_offset, x); ++ RADEON_WRITE(AVIVO_D1GRPH_Y_START + radeon_crtc->crtc_offset, y); ++ RADEON_WRITE(AVIVO_D1GRPH_X_END + radeon_crtc->crtc_offset, x + crtc->mode.hdisplay); ++ RADEON_WRITE(AVIVO_D1GRPH_Y_END + radeon_crtc->crtc_offset, y + crtc->mode.vdisplay); ++ ++ fb_pitch_pixels = crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8); ++ RADEON_WRITE(AVIVO_D1GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels); ++ RADEON_WRITE(AVIVO_D1GRPH_ENABLE + radeon_crtc->crtc_offset, 1); ++ ++ RADEON_WRITE(AVIVO_D1MODE_DESKTOP_HEIGHT + radeon_crtc->crtc_offset, ++ crtc->mode.vdisplay); ++ RADEON_WRITE(AVIVO_D1MODE_VIEWPORT_START + radeon_crtc->crtc_offset, (x << 16) | y); ++ RADEON_WRITE(AVIVO_D1MODE_VIEWPORT_SIZE + radeon_crtc->crtc_offset, ++ (crtc->mode.hdisplay << 16) | crtc->mode.vdisplay); ++ ++ if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ++ RADEON_WRITE(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, ++ AVIVO_D1MODE_INTERLEAVE_EN); ++ else ++ RADEON_WRITE(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, ++ 0); ++} ++ ++void atombios_crtc_mode_set(struct drm_crtc *crtc, ++ struct drm_display_mode *mode, ++ struct drm_display_mode *adjusted_mode, ++ int x, int y) ++{ ++ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); ++ struct drm_device *dev = crtc->dev; ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ struct drm_encoder *encoder; ++ SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION crtc_timing; ++ ++ /* TODO color tiling */ ++ memset(&crtc_timing, 0, sizeof(crtc_timing)); ++ ++ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { ++ ++ ++ ++ } ++ ++ crtc_timing.ucCRTC = radeon_crtc->crtc_id; ++ crtc_timing.usH_Total = adjusted_mode->crtc_htotal; ++ crtc_timing.usH_Disp = adjusted_mode->crtc_hdisplay; ++ crtc_timing.usH_SyncStart = adjusted_mode->crtc_hsync_start; ++ crtc_timing.usH_SyncWidth = adjusted_mode->crtc_hsync_end - adjusted_mode->crtc_hsync_start; ++ ++ crtc_timing.usV_Total = adjusted_mode->crtc_vtotal; ++ crtc_timing.usV_Disp = adjusted_mode->crtc_vdisplay; ++ crtc_timing.usV_SyncStart = adjusted_mode->crtc_vsync_start; ++ crtc_timing.usV_SyncWidth = adjusted_mode->crtc_vsync_end - adjusted_mode->crtc_vsync_start; ++ ++ if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) ++ crtc_timing.susModeMiscInfo.usAccess |= ATOM_VSYNC_POLARITY; ++ ++ if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) ++ crtc_timing.susModeMiscInfo.usAccess |= ATOM_HSYNC_POLARITY; ++ ++ if (adjusted_mode->flags & DRM_MODE_FLAG_CSYNC) ++ crtc_timing.susModeMiscInfo.usAccess |= ATOM_COMPOSITESYNC; ++ ++ if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ++ crtc_timing.susModeMiscInfo.usAccess |= ATOM_INTERLACE; ++ ++ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) ++ crtc_timing.susModeMiscInfo.usAccess |= ATOM_DOUBLE_CLOCK_MODE; ++ ++ atombios_crtc_set_pll(crtc, adjusted_mode); ++ atombios_crtc_set_timing(crtc, &crtc_timing); ++ ++ if (radeon_is_avivo(dev_priv)) ++ atombios_crtc_set_base(crtc, x, y); ++ else { ++ if (radeon_crtc->crtc_id == 0) { ++ SET_CRTC_USING_DTD_TIMING_PARAMETERS crtc_dtd_timing; ++ memset(&crtc_dtd_timing, 0, sizeof(crtc_dtd_timing)); ++ ++ /* setup FP shadow regs on R4xx */ ++ crtc_dtd_timing.ucCRTC = radeon_crtc->crtc_id; ++ crtc_dtd_timing.usH_Size = adjusted_mode->crtc_hdisplay; ++ crtc_dtd_timing.usV_Size = adjusted_mode->crtc_vdisplay; ++ crtc_dtd_timing.usH_Blanking_Time = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hdisplay; ++ crtc_dtd_timing.usV_Blanking_Time = adjusted_mode->crtc_vblank_end - adjusted_mode->crtc_vdisplay; ++ crtc_dtd_timing.usH_SyncOffset = adjusted_mode->crtc_hsync_start - adjusted_mode->crtc_hdisplay; ++ crtc_dtd_timing.usV_SyncOffset = adjusted_mode->crtc_vsync_start - adjusted_mode->crtc_vdisplay; ++ crtc_dtd_timing.usH_SyncWidth = adjusted_mode->crtc_hsync_end - adjusted_mode->crtc_hsync_start; ++ crtc_dtd_timing.usV_SyncWidth = adjusted_mode->crtc_vsync_end - adjusted_mode->crtc_vsync_start; ++ //crtc_dtd_timing.ucH_Border = adjusted_mode->crtc_hborder; ++ //crtc_dtd_timing.ucV_Border = adjusted_mode->crtc_vborder; ++ ++ if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) ++ crtc_dtd_timing.susModeMiscInfo.usAccess |= ATOM_VSYNC_POLARITY; ++ ++ if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) ++ crtc_dtd_timing.susModeMiscInfo.usAccess |= ATOM_HSYNC_POLARITY; ++ ++ if (adjusted_mode->flags & DRM_MODE_FLAG_CSYNC) ++ crtc_dtd_timing.susModeMiscInfo.usAccess |= ATOM_COMPOSITESYNC; ++ ++ if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ++ crtc_dtd_timing.susModeMiscInfo.usAccess |= ATOM_INTERLACE; ++ ++ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) ++ crtc_dtd_timing.susModeMiscInfo.usAccess |= ATOM_DOUBLE_CLOCK_MODE; ++ ++ atombios_set_crtc_dtd_timing(crtc, &crtc_dtd_timing); ++ } ++ radeon_crtc_set_base(crtc, x, y); ++ radeon_legacy_atom_set_surface(crtc); ++ } ++ ++} ++ ++static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc, ++ struct drm_display_mode *mode, ++ struct drm_display_mode *adjusted_mode) ++{ ++ return true; ++} ++ ++ ++static void atombios_crtc_prepare(struct drm_crtc *crtc) ++{ ++ atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); ++ atombios_lock_crtc(crtc, 1); ++} ++ ++static void atombios_crtc_commit(struct drm_crtc *crtc) ++{ ++ atombios_crtc_dpms(crtc, DRM_MODE_DPMS_ON); ++ atombios_lock_crtc(crtc, 0); ++} ++ ++static const struct drm_crtc_helper_funcs atombios_helper_funcs = { ++ .dpms = atombios_crtc_dpms, ++ .mode_fixup = atombios_crtc_mode_fixup, ++ .mode_set = atombios_crtc_mode_set, ++ .mode_set_base = atombios_crtc_set_base, ++ .prepare = atombios_crtc_prepare, ++ .commit = atombios_crtc_commit, ++}; ++ ++void radeon_atombios_init_crtc(struct drm_device *dev, ++ struct radeon_crtc *radeon_crtc) ++{ ++ if (radeon_crtc->crtc_id == 1) ++ radeon_crtc->crtc_offset = AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL; ++ drm_crtc_helper_add(&radeon_crtc->base, &atombios_helper_funcs); ++} +diff --git a/drivers/gpu/drm/radeon/r300_cmdbuf.c b/drivers/gpu/drm/radeon/r300_cmdbuf.c +index cace396..d2b4c4f 100644 +--- a/drivers/gpu/drm/radeon/r300_cmdbuf.c ++++ b/drivers/gpu/drm/radeon/r300_cmdbuf.c +@@ -35,6 +35,7 @@ + #include "drm.h" + #include "radeon_drm.h" + #include "radeon_drv.h" ++#include "radeon_reg.h" + #include "r300_reg.h" + + #define R300_SIMULTANEOUS_CLIPRECTS 4 +@@ -166,8 +167,6 @@ void r300_init_reg_flags(struct drm_device *dev) + for(i=((reg)>>2);i<((reg)>>2)+(count);i++)\ + r300_reg_flags[i]|=(mark); + +-#define MARK_SAFE 1 +-#define MARK_CHECK_OFFSET 2 + + #define ADD_RANGE(reg, count) ADD_RANGE_MARK(reg, count, MARK_SAFE) + +@@ -205,7 +204,7 @@ void r300_init_reg_flags(struct drm_device *dev) + ADD_RANGE(0x42C0, 2); + ADD_RANGE(R300_RS_CNTL_0, 2); + +- ADD_RANGE(R300_SC_HYPERZ, 2); ++ ADD_RANGE(0x43A4, 2); + ADD_RANGE(0x43E8, 1); + + ADD_RANGE(0x46A4, 5); +@@ -224,12 +223,14 @@ void r300_init_reg_flags(struct drm_device *dev) + ADD_RANGE(0x4E50, 9); + ADD_RANGE(0x4E88, 1); + ADD_RANGE(0x4EA0, 2); +- ADD_RANGE(R300_ZB_CNTL, 3); +- ADD_RANGE(R300_ZB_FORMAT, 4); +- ADD_RANGE_MARK(R300_ZB_DEPTHOFFSET, 1, MARK_CHECK_OFFSET); /* check offset */ +- ADD_RANGE(R300_ZB_DEPTHPITCH, 1); +- ADD_RANGE(R300_ZB_DEPTHCLEARVALUE, 1); +- ADD_RANGE(R300_ZB_ZMASK_OFFSET, 13); ++ ADD_RANGE(R300_RB3D_ZSTENCIL_CNTL_0, 3); ++ ADD_RANGE(R300_RB3D_ZSTENCIL_FORMAT, 4); ++ ADD_RANGE_MARK(R300_RB3D_DEPTHOFFSET, 1, MARK_CHECK_OFFSET); /* check offset */ ++ ADD_RANGE(R300_RB3D_DEPTHPITCH, 1); ++ ADD_RANGE(0x4F28, 1); ++ ADD_RANGE(0x4F30, 2); ++ ADD_RANGE(0x4F44, 1); ++ ADD_RANGE(0x4F54, 1); + + ADD_RANGE(R300_TX_FILTER_0, 16); + ADD_RANGE(R300_TX_FILTER1_0, 16); +@@ -242,11 +243,16 @@ void r300_init_reg_flags(struct drm_device *dev) + ADD_RANGE(R300_TX_BORDER_COLOR_0, 16); + + /* Sporadic registers used as primitives are emitted */ +- ADD_RANGE(R300_ZB_ZCACHE_CTLSTAT, 1); ++ ADD_RANGE(R300_RB3D_ZCACHE_CTLSTAT, 1); + ADD_RANGE(R300_RB3D_DSTCACHE_CTLSTAT, 1); + ADD_RANGE(R300_VAP_INPUT_ROUTE_0_0, 8); + ADD_RANGE(R300_VAP_INPUT_ROUTE_1_0, 8); + ++ ADD_RANGE(R500_SU_REG_DEST, 1); ++ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R420) { ++ ADD_RANGE(R300_DST_PIPE_CONFIG, 1); ++ } ++ + if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) { + ADD_RANGE(R500_VAP_INDEX_OFFSET, 1); + ADD_RANGE(R500_US_CONFIG, 2); +@@ -256,7 +262,8 @@ void r300_init_reg_flags(struct drm_device *dev) + ADD_RANGE(R500_RS_INST_0, 16); + ADD_RANGE(R500_RB3D_COLOR_CLEAR_VALUE_AR, 2); + ADD_RANGE(R500_RB3D_CONSTANT_COLOR_AR, 2); +- ADD_RANGE(R500_ZB_FIFO_SIZE, 2); ++// ADD_RANGE(R500_ZB_FIFO_SIZE 2); ++ ADD_RANGE(R500_GA_US_VECTOR_INDEX, 2); + } else { + ADD_RANGE(R300_PFS_CNTL_0, 3); + ADD_RANGE(R300_PFS_NODE_0, 4); +@@ -269,9 +276,113 @@ void r300_init_reg_flags(struct drm_device *dev) + ADD_RANGE(R300_RS_ROUTE_0, 8); + + } ++ ++ /* add 2d blit engine registers for DDX */ ++ ADD_RANGE(RADEON_SRC_Y_X, 3); /* 1434, 1438, 143c, ++ SRC_Y_X, DST_Y_X, DST_HEIGHT_WIDTH ++ */ ++ ADD_RANGE(RADEON_DP_GUI_MASTER_CNTL, 1); /* 146c */ ++ ADD_RANGE(RADEON_DP_BRUSH_BKGD_CLR, 2); /* 1478, 147c */ ++ ADD_RANGE(RADEON_DP_SRC_FRGD_CLR, 2); /* 15d8, 15dc */ ++ ADD_RANGE(RADEON_DP_CNTL, 1); /* 16c0 */ ++ ADD_RANGE(RADEON_DP_WRITE_MASK, 1); /* 16cc */ ++ ADD_RANGE(RADEON_DEFAULT_SC_BOTTOM_RIGHT, 1); /* 16e8 */ ++ ++ ADD_RANGE(RADEON_DSTCACHE_CTLSTAT, 1); ++ ADD_RANGE(RADEON_WAIT_UNTIL, 1); ++ ++ ADD_RANGE_MARK(RADEON_DST_OFFSET, 1, MARK_CHECK_OFFSET); ++ ADD_RANGE_MARK(RADEON_SRC_OFFSET, 1, MARK_CHECK_OFFSET); ++ ++ ADD_RANGE_MARK(RADEON_DST_PITCH_OFFSET, 1, MARK_CHECK_OFFSET); ++ ADD_RANGE_MARK(RADEON_SRC_PITCH_OFFSET, 1, MARK_CHECK_OFFSET); ++ ++ /* TODO SCISSOR */ ++ ADD_RANGE_MARK(R300_SC_SCISSOR0, 2, MARK_CHECK_SCISSOR); ++ ++ ADD_RANGE(R300_SC_CLIP_0_A, 2); ++ ADD_RANGE(R300_SC_CLIP_RULE, 1); ++ ADD_RANGE(R300_SC_SCREENDOOR, 1); ++ ++ ADD_RANGE(R300_VAP_PVS_CODE_CNTL_0, 4); ++ ADD_RANGE(R300_VAP_PVS_VECTOR_INDX_REG, 2); ++ ADD_RANGE(R300_VAP_PVS_UPLOAD_DATA, 1); ++ ++ if (dev_priv->chip_family <= CHIP_RV280) { ++ ADD_RANGE(RADEON_RE_TOP_LEFT, 1); ++ ADD_RANGE(RADEON_RE_WIDTH_HEIGHT, 1); ++ ADD_RANGE(RADEON_AUX_SC_CNTL, 1); ++ ADD_RANGE(RADEON_RB3D_DSTCACHE_CTLSTAT, 1); ++ ADD_RANGE(RADEON_RB3D_PLANEMASK, 1); ++ ADD_RANGE(RADEON_SE_CNTL, 1); ++ ADD_RANGE(RADEON_PP_CNTL, 1); ++ ADD_RANGE(RADEON_RB3D_CNTL, 1); ++ ADD_RANGE_MARK(RADEON_RB3D_COLOROFFSET, 1, MARK_CHECK_OFFSET); ++ ADD_RANGE(RADEON_RB3D_COLORPITCH, 1); ++ ADD_RANGE(RADEON_RB3D_BLENDCNTL, 1); ++ ++ if (dev_priv->chip_family >= CHIP_R200) { ++ ADD_RANGE(R200_PP_CNTL_X, 1); ++ ADD_RANGE(R200_PP_TXMULTI_CTL_0, 1); ++ ADD_RANGE(R200_SE_VTX_STATE_CNTL, 1); ++ ADD_RANGE(R200_RE_CNTL, 1); ++ ADD_RANGE(R200_SE_VTE_CNTL, 1); ++ ADD_RANGE(R200_SE_VAP_CNTL, 1); ++ ++ ADD_RANGE(R200_PP_TXFILTER_0, 1); ++ ADD_RANGE(R200_PP_TXFORMAT_0, 1); ++ ADD_RANGE(R200_PP_TXFORMAT_X_0, 1); ++ ADD_RANGE(R200_PP_TXSIZE_0, 1); ++ ADD_RANGE(R200_PP_TXPITCH_0, 1); ++ ADD_RANGE(R200_PP_TFACTOR_0, 1); ++ ++ ADD_RANGE(R200_PP_TXFILTER_1, 1); ++ ADD_RANGE(R200_PP_TXFORMAT_1, 1); ++ ADD_RANGE(R200_PP_TXFORMAT_X_1, 1); ++ ADD_RANGE(R200_PP_TXSIZE_1, 1); ++ ADD_RANGE(R200_PP_TXPITCH_1, 1); ++ ADD_RANGE(R200_PP_TFACTOR_1, 1); ++ ++ ADD_RANGE_MARK(R200_PP_TXOFFSET_0, 1, MARK_CHECK_OFFSET); ++ ADD_RANGE_MARK(R200_PP_TXOFFSET_1, 1, MARK_CHECK_OFFSET); ++ ADD_RANGE_MARK(R200_PP_TXOFFSET_2, 1, MARK_CHECK_OFFSET); ++ ADD_RANGE_MARK(R200_PP_TXOFFSET_3, 1, MARK_CHECK_OFFSET); ++ ADD_RANGE_MARK(R200_PP_TXOFFSET_4, 1, MARK_CHECK_OFFSET); ++ ADD_RANGE_MARK(R200_PP_TXOFFSET_5, 1, MARK_CHECK_OFFSET); ++ ++ ADD_RANGE(R200_SE_VTX_FMT_0, 1); ++ ADD_RANGE(R200_SE_VTX_FMT_1, 1); ++ ADD_RANGE(R200_PP_TXCBLEND_0, 1); ++ ADD_RANGE(R200_PP_TXCBLEND2_0, 1); ++ ADD_RANGE(R200_PP_TXABLEND_0, 1); ++ ADD_RANGE(R200_PP_TXABLEND2_0, 1); ++ ++ } else { ++ ++ ADD_RANGE(RADEON_SE_COORD_FMT, 1); ++ ADD_RANGE(RADEON_SE_CNTL_STATUS, 1); ++ ++ ADD_RANGE(RADEON_PP_TXFILTER_0, 1); ++ ADD_RANGE(RADEON_PP_TXFORMAT_0, 1); ++ ADD_RANGE(RADEON_PP_TEX_SIZE_0, 1); ++ ADD_RANGE(RADEON_PP_TEX_PITCH_0, 1); ++ ++ ADD_RANGE(RADEON_PP_TXFILTER_1, 1); ++ ADD_RANGE(RADEON_PP_TXFORMAT_1, 1); ++ ADD_RANGE(RADEON_PP_TEX_SIZE_1, 1); ++ ADD_RANGE(RADEON_PP_TEX_PITCH_1, 1); ++ ++ ADD_RANGE(RADEON_PP_TXCBLEND_0, 1); ++ ADD_RANGE(RADEON_PP_TXABLEND_0, 1); ++ ADD_RANGE(RADEON_SE_VTX_FMT, 1); ++ ADD_RANGE_MARK(RADEON_PP_TXOFFSET_0, 1, MARK_CHECK_OFFSET); ++ ADD_RANGE_MARK(RADEON_PP_TXOFFSET_1, 1, MARK_CHECK_OFFSET); ++ ADD_RANGE_MARK(RADEON_PP_TXOFFSET_2, 1, MARK_CHECK_OFFSET); ++ } ++ } + } + +-static __inline__ int r300_check_range(unsigned reg, int count) ++int r300_check_range(unsigned reg, int count) + { + int i; + if (reg & ~0xffff) +@@ -282,6 +393,13 @@ static __inline__ int r300_check_range(unsigned reg, int count) + return 0; + } + ++int r300_get_reg_flags(unsigned reg) ++{ ++ if (reg & ~0xffff) ++ return -1; ++ return r300_reg_flags[(reg >> 2)]; ++} ++ + static __inline__ int r300_emit_carefully_checked_packet0(drm_radeon_private_t * + dev_priv, + drm_radeon_kcmd_buffer_t +diff --git a/drivers/gpu/drm/radeon/r300_reg.h b/drivers/gpu/drm/radeon/r300_reg.h +index ee6f811..12f4abb 100644 +--- a/drivers/gpu/drm/radeon/r300_reg.h ++++ b/drivers/gpu/drm/radeon/r300_reg.h +@@ -126,15 +126,6 @@ USE OR OTHER DEALINGS IN THE SOFTWARE. + /* END: Wild guesses */ + + #define R300_SE_VTE_CNTL 0x20b0 +-# define R300_VPORT_X_SCALE_ENA 0x00000001 +-# define R300_VPORT_X_OFFSET_ENA 0x00000002 +-# define R300_VPORT_Y_SCALE_ENA 0x00000004 +-# define R300_VPORT_Y_OFFSET_ENA 0x00000008 +-# define R300_VPORT_Z_SCALE_ENA 0x00000010 +-# define R300_VPORT_Z_OFFSET_ENA 0x00000020 +-# define R300_VTX_XY_FMT 0x00000100 +-# define R300_VTX_Z_FMT 0x00000200 +-# define R300_VTX_W0_FMT 0x00000400 + # define R300_VTX_W0_NORMALIZE 0x00000800 + # define R300_VTX_ST_DENORMALIZED 0x00001000 + +@@ -490,7 +481,7 @@ USE OR OTHER DEALINGS IN THE SOFTWARE. + # define R300_OFIFO_HIGHWATER_SHIFT 22 /* two bits only */ + # define R300_CUBE_FIFO_HIGHWATER_COL_SHIFT 24 + +-#define R300_GB_SELECT 0x401C ++ + # define R300_GB_FOG_SELECT_C0A 0 + # define R300_GB_FOG_SELECT_C1A 1 + # define R300_GB_FOG_SELECT_C2A 2 +@@ -702,27 +693,6 @@ USE OR OTHER DEALINGS IN THE SOFTWARE. + # define R300_RS_ROUTE_1_UNKNOWN11 (1 << 11) + /* END: Rasterization / Interpolators - many guesses */ + +-/* Hierarchical Z Enable */ +-#define R300_SC_HYPERZ 0x43a4 +-# define R300_SC_HYPERZ_DISABLE (0 << 0) +-# define R300_SC_HYPERZ_ENABLE (1 << 0) +-# define R300_SC_HYPERZ_MIN (0 << 1) +-# define R300_SC_HYPERZ_MAX (1 << 1) +-# define R300_SC_HYPERZ_ADJ_256 (0 << 2) +-# define R300_SC_HYPERZ_ADJ_128 (1 << 2) +-# define R300_SC_HYPERZ_ADJ_64 (2 << 2) +-# define R300_SC_HYPERZ_ADJ_32 (3 << 2) +-# define R300_SC_HYPERZ_ADJ_16 (4 << 2) +-# define R300_SC_HYPERZ_ADJ_8 (5 << 2) +-# define R300_SC_HYPERZ_ADJ_4 (6 << 2) +-# define R300_SC_HYPERZ_ADJ_2 (7 << 2) +-# define R300_SC_HYPERZ_HZ_Z0MIN_NO (0 << 5) +-# define R300_SC_HYPERZ_HZ_Z0MIN (1 << 5) +-# define R300_SC_HYPERZ_HZ_Z0MAX_NO (0 << 6) +-# define R300_SC_HYPERZ_HZ_Z0MAX (1 << 6) +- +-#define R300_SC_EDGERULE 0x43a8 +- + /* BEGIN: Scissors and cliprects */ + + /* There are four clipping rectangles. Their corner coordinates are inclusive. +@@ -952,7 +922,6 @@ USE OR OTHER DEALINGS IN THE SOFTWARE. + /* 32 bit chroma key */ + #define R300_TX_CHROMA_KEY_0 0x4580 + /* ff00ff00 == { 0, 1.0, 0, 1.0 } */ +-#define R300_TX_BORDER_COLOR_0 0x45C0 + + /* END: Texture specification */ + +@@ -1337,7 +1306,6 @@ USE OR OTHER DEALINGS IN THE SOFTWARE. + + /* gap */ + +-#define R300_RB3D_COLOROFFSET0 0x4E28 + # define R300_COLOROFFSET_MASK 0xFFFFFFF0 /* GUESS */ + #define R300_RB3D_COLOROFFSET1 0x4E2C /* GUESS */ + #define R300_RB3D_COLOROFFSET2 0x4E30 /* GUESS */ +@@ -1349,7 +1317,6 @@ USE OR OTHER DEALINGS IN THE SOFTWARE. + * Bit 17: 4x2 tiles + * Bit 18: Extremely weird tile like, but some pixels duplicated? + */ +-#define R300_RB3D_COLORPITCH0 0x4E38 + # define R300_COLORPITCH_MASK 0x00001FF8 /* GUESS */ + # define R300_COLOR_TILE_ENABLE (1 << 16) /* GUESS */ + # define R300_COLOR_MICROTILE_ENABLE (1 << 17) /* GUESS */ +@@ -1362,7 +1329,7 @@ USE OR OTHER DEALINGS IN THE SOFTWARE. + #define R300_RB3D_COLORPITCH2 0x4E40 /* GUESS */ + #define R300_RB3D_COLORPITCH3 0x4E44 /* GUESS */ + +-#define R300_RB3D_AARESOLVE_CTL 0x4E88 ++//#define R300_RB3D_AARESOLVE_CTL 0x4E88 + /* gap */ + + /* Guess by Vladimir. +@@ -1377,14 +1344,19 @@ USE OR OTHER DEALINGS IN THE SOFTWARE. + * for this. + * Bit (1<<8) is the "test" bit. so plain write is 6 - vd + */ +-#define R300_ZB_CNTL 0x4F00 +-# define R300_STENCIL_ENABLE (1 << 0) +-# define R300_Z_ENABLE (1 << 1) +-# define R300_Z_WRITE_ENABLE (1 << 2) +-# define R300_Z_SIGNED_COMPARE (1 << 3) +-# define R300_STENCIL_FRONT_BACK (1 << 4) +- +-#define R300_ZB_ZSTENCILCNTL 0x4f04 ++#define R300_RB3D_ZSTENCIL_CNTL_0 0x4F00 ++# define R300_RB3D_Z_DISABLED_1 0x00000010 ++# define R300_RB3D_Z_DISABLED_2 0x00000014 ++# define R300_RB3D_Z_TEST 0x00000012 ++# define R300_RB3D_Z_TEST_AND_WRITE 0x00000016 ++# define R300_RB3D_Z_WRITE_ONLY 0x00000006 ++ ++# define R300_RB3D_Z_TEST 0x00000012 ++# define R300_RB3D_Z_TEST_AND_WRITE 0x00000016 ++# define R300_RB3D_Z_WRITE_ONLY 0x00000006 ++# define R300_RB3D_STENCIL_ENABLE 0x00000001 ++ ++#define R300_RB3D_ZSTENCIL_CNTL_1 0x4F04 + /* functions */ + # define R300_ZS_NEVER 0 + # define R300_ZS_LESS 1 +@@ -1404,166 +1376,52 @@ USE OR OTHER DEALINGS IN THE SOFTWARE. + # define R300_ZS_INVERT 5 + # define R300_ZS_INCR_WRAP 6 + # define R300_ZS_DECR_WRAP 7 +-# define R300_Z_FUNC_SHIFT 0 + /* front and back refer to operations done for front + and back faces, i.e. separate stencil function support */ +-# define R300_S_FRONT_FUNC_SHIFT 3 +-# define R300_S_FRONT_SFAIL_OP_SHIFT 6 +-# define R300_S_FRONT_ZPASS_OP_SHIFT 9 +-# define R300_S_FRONT_ZFAIL_OP_SHIFT 12 +-# define R300_S_BACK_FUNC_SHIFT 15 +-# define R300_S_BACK_SFAIL_OP_SHIFT 18 +-# define R300_S_BACK_ZPASS_OP_SHIFT 21 +-# define R300_S_BACK_ZFAIL_OP_SHIFT 24 +- +-#define R300_ZB_STENCILREFMASK 0x4f08 +-# define R300_STENCILREF_SHIFT 0 +-# define R300_STENCILREF_MASK 0x000000ff +-# define R300_STENCILMASK_SHIFT 8 +-# define R300_STENCILMASK_MASK 0x0000ff00 +-# define R300_STENCILWRITEMASK_SHIFT 16 +-# define R300_STENCILWRITEMASK_MASK 0x00ff0000 ++# define R300_RB3D_ZS1_DEPTH_FUNC_SHIFT 0 ++# define R300_RB3D_ZS1_FRONT_FUNC_SHIFT 3 ++# define R300_RB3D_ZS1_FRONT_FAIL_OP_SHIFT 6 ++# define R300_RB3D_ZS1_FRONT_ZPASS_OP_SHIFT 9 ++# define R300_RB3D_ZS1_FRONT_ZFAIL_OP_SHIFT 12 ++# define R300_RB3D_ZS1_BACK_FUNC_SHIFT 15 ++# define R300_RB3D_ZS1_BACK_FAIL_OP_SHIFT 18 ++# define R300_RB3D_ZS1_BACK_ZPASS_OP_SHIFT 21 ++# define R300_RB3D_ZS1_BACK_ZFAIL_OP_SHIFT 24 ++ ++#define R300_RB3D_ZSTENCIL_CNTL_2 0x4F08 ++# define R300_RB3D_ZS2_STENCIL_REF_SHIFT 0 ++# define R300_RB3D_ZS2_STENCIL_MASK 0xFF ++# define R300_RB3D_ZS2_STENCIL_MASK_SHIFT 8 ++# define R300_RB3D_ZS2_STENCIL_WRITE_MASK_SHIFT 16 + + /* gap */ + +-#define R300_ZB_FORMAT 0x4f10 +-# define R300_DEPTHFORMAT_16BIT_INT_Z (0 << 0) +-# define R300_DEPTHFORMAT_16BIT_13E3 (1 << 0) +-# define R300_DEPTHFORMAT_24BIT_INT_Z_8BIT_STENCIL (2 << 0) +-/* reserved up to (15 << 0) */ +-# define R300_INVERT_13E3_LEADING_ONES (0 << 4) +-# define R300_INVERT_13E3_LEADING_ZEROS (1 << 4) ++#define R300_RB3D_ZSTENCIL_FORMAT 0x4F10 ++# define R300_DEPTH_FORMAT_16BIT_INT_Z (0 << 0) ++# define R300_DEPTH_FORMAT_24BIT_INT_Z (2 << 0) ++ /* 16 bit format or some aditional bit ? */ ++# define R300_DEPTH_FORMAT_UNK32 (32 << 0) + +-#define R300_ZB_ZTOP 0x4F14 +-# define R300_ZTOP_DISABLE (0 << 0) +-# define R300_ZTOP_ENABLE (1 << 0) ++#define R300_RB3D_EARLY_Z 0x4F14 ++# define R300_EARLY_Z_DISABLE (0 << 0) ++# define R300_EARLY_Z_ENABLE (1 << 0) + + /* gap */ + +-#define R300_ZB_ZCACHE_CTLSTAT 0x4f18 +-# define R300_ZB_ZCACHE_CTLSTAT_ZC_FLUSH_NO_EFFECT (0 << 0) +-# define R300_ZB_ZCACHE_CTLSTAT_ZC_FLUSH_FLUSH_AND_FREE (1 << 0) +-# define R300_ZB_ZCACHE_CTLSTAT_ZC_FREE_NO_EFFECT (0 << 1) +-# define R300_ZB_ZCACHE_CTLSTAT_ZC_FREE_FREE (1 << 1) +-# define R300_ZB_ZCACHE_CTLSTAT_ZC_BUSY_IDLE (0 << 31) +-# define R300_ZB_ZCACHE_CTLSTAT_ZC_BUSY_BUSY (1 << 31) +- +-#define R300_ZB_BW_CNTL 0x4f1c +-# define R300_HIZ_DISABLE (0 << 0) +-# define R300_HIZ_ENABLE (1 << 0) +-# define R300_HIZ_MIN (0 << 1) +-# define R300_HIZ_MAX (1 << 1) +-# define R300_FAST_FILL_DISABLE (0 << 2) +-# define R300_FAST_FILL_ENABLE (1 << 2) +-# define R300_RD_COMP_DISABLE (0 << 3) +-# define R300_RD_COMP_ENABLE (1 << 3) +-# define R300_WR_COMP_DISABLE (0 << 4) +-# define R300_WR_COMP_ENABLE (1 << 4) +-# define R300_ZB_CB_CLEAR_RMW (0 << 5) +-# define R300_ZB_CB_CLEAR_CACHE_LINEAR (1 << 5) +-# define R300_FORCE_COMPRESSED_STENCIL_VALUE_DISABLE (0 << 6) +-# define R300_FORCE_COMPRESSED_STENCIL_VALUE_ENABLE (1 << 6) +- +-# define R500_ZEQUAL_OPTIMIZE_ENABLE (0 << 7) +-# define R500_ZEQUAL_OPTIMIZE_DISABLE (1 << 7) +-# define R500_SEQUAL_OPTIMIZE_ENABLE (0 << 8) +-# define R500_SEQUAL_OPTIMIZE_DISABLE (1 << 8) +- +-# define R500_BMASK_ENABLE (0 << 10) +-# define R500_BMASK_DISABLE (1 << 10) +-# define R500_HIZ_EQUAL_REJECT_DISABLE (0 << 11) +-# define R500_HIZ_EQUAL_REJECT_ENABLE (1 << 11) +-# define R500_HIZ_FP_EXP_BITS_DISABLE (0 << 12) +-# define R500_HIZ_FP_EXP_BITS_1 (1 << 12) +-# define R500_HIZ_FP_EXP_BITS_2 (2 << 12) +-# define R500_HIZ_FP_EXP_BITS_3 (3 << 12) +-# define R500_HIZ_FP_EXP_BITS_4 (4 << 12) +-# define R500_HIZ_FP_EXP_BITS_5 (5 << 12) +-# define R500_HIZ_FP_INVERT_LEADING_ONES (0 << 15) +-# define R500_HIZ_FP_INVERT_LEADING_ZEROS (1 << 15) +-# define R500_TILE_OVERWRITE_RECOMPRESSION_ENABLE (0 << 16) +-# define R500_TILE_OVERWRITE_RECOMPRESSION_DISABLE (1 << 16) +-# define R500_CONTIGUOUS_6XAA_SAMPLES_ENABLE (0 << 17) +-# define R500_CONTIGUOUS_6XAA_SAMPLES_DISABLE (1 << 17) +-# define R500_PEQ_PACKING_DISABLE (0 << 18) +-# define R500_PEQ_PACKING_ENABLE (1 << 18) +-# define R500_COVERED_PTR_MASKING_DISABLE (0 << 18) +-# define R500_COVERED_PTR_MASKING_ENABLE (1 << 18) +- ++//#define R300_RB3D_ZCACHE_CTLSTAT 0x4F18 /* GUESS */ ++# define R300_RB3D_ZCACHE_UNKNOWN_01 0x1 ++# define R300_RB3D_ZCACHE_UNKNOWN_03 0x3 + + /* gap */ + +-/* Z Buffer Address Offset. +- * Bits 31 to 5 are used for aligned Z buffer address offset for macro tiles. +- */ +-#define R300_ZB_DEPTHOFFSET 0x4f20 +- +-/* Z Buffer Pitch and Endian Control */ +-#define R300_ZB_DEPTHPITCH 0x4f24 +-# define R300_DEPTHPITCH_MASK 0x00003FFC +-# define R300_DEPTHMACROTILE_DISABLE (0 << 16) +-# define R300_DEPTHMACROTILE_ENABLE (1 << 16) +-# define R300_DEPTHMICROTILE_LINEAR (0 << 17) +-# define R300_DEPTHMICROTILE_TILED (1 << 17) +-# define R300_DEPTHMICROTILE_TILED_SQUARE (2 << 17) +-# define R300_DEPTHENDIAN_NO_SWAP (0 << 18) +-# define R300_DEPTHENDIAN_WORD_SWAP (1 << 18) +-# define R300_DEPTHENDIAN_DWORD_SWAP (2 << 18) +-# define R300_DEPTHENDIAN_HALF_DWORD_SWAP (3 << 18) +- +-/* Z Buffer Clear Value */ +-#define R300_ZB_DEPTHCLEARVALUE 0x4f28 +- +-#define R300_ZB_ZMASK_OFFSET 0x4f30 +-#define R300_ZB_ZMASK_PITCH 0x4f34 +-#define R300_ZB_ZMASK_WRINDEX 0x4f38 +-#define R300_ZB_ZMASK_DWORD 0x4f3c +-#define R300_ZB_ZMASK_RDINDEX 0x4f40 +- +-/* Hierarchical Z Memory Offset */ +-#define R300_ZB_HIZ_OFFSET 0x4f44 +- +-/* Hierarchical Z Write Index */ +-#define R300_ZB_HIZ_WRINDEX 0x4f48 +- +-/* Hierarchical Z Data */ +-#define R300_ZB_HIZ_DWORD 0x4f4c +- +-/* Hierarchical Z Read Index */ +-#define R300_ZB_HIZ_RDINDEX 0x4f50 +- +-/* Hierarchical Z Pitch */ +-#define R300_ZB_HIZ_PITCH 0x4f54 +- +-/* Z Buffer Z Pass Counter Data */ +-#define R300_ZB_ZPASS_DATA 0x4f58 +- +-/* Z Buffer Z Pass Counter Address */ +-#define R300_ZB_ZPASS_ADDR 0x4f5c +- +-/* Depth buffer X and Y coordinate offset */ +-#define R300_ZB_DEPTHXY_OFFSET 0x4f60 +-# define R300_DEPTHX_OFFSET_SHIFT 1 +-# define R300_DEPTHX_OFFSET_MASK 0x000007FE +-# define R300_DEPTHY_OFFSET_SHIFT 17 +-# define R300_DEPTHY_OFFSET_MASK 0x07FE0000 +- +-/* Sets the fifo sizes */ +-#define R500_ZB_FIFO_SIZE 0x4fd0 +-# define R500_OP_FIFO_SIZE_FULL (0 << 0) +-# define R500_OP_FIFO_SIZE_HALF (1 << 0) +-# define R500_OP_FIFO_SIZE_QUATER (2 << 0) +-# define R500_OP_FIFO_SIZE_EIGTHS (4 << 0) +- +-/* Stencil Reference Value and Mask for backfacing quads */ +-/* R300_ZB_STENCILREFMASK handles front face */ +-#define R500_ZB_STENCILREFMASK_BF 0x4fd4 +-# define R500_STENCILREF_SHIFT 0 +-# define R500_STENCILREF_MASK 0x000000ff +-# define R500_STENCILMASK_SHIFT 8 +-# define R500_STENCILMASK_MASK 0x0000ff00 +-# define R500_STENCILWRITEMASK_SHIFT 16 +-# define R500_STENCILWRITEMASK_MASK 0x00ff0000 ++#define R300_RB3D_DEPTHOFFSET 0x4F20 ++#define R300_RB3D_DEPTHPITCH 0x4F24 ++# define R300_DEPTHPITCH_MASK 0x00001FF8 /* GUESS */ ++# define R300_DEPTH_TILE_ENABLE (1 << 16) /* GUESS */ ++# define R300_DEPTH_MICROTILE_ENABLE (1 << 17) /* GUESS */ ++# define R300_DEPTH_ENDIAN_NO_SWAP (0 << 18) /* GUESS */ ++# define R300_DEPTH_ENDIAN_WORD_SWAP (1 << 18) /* GUESS */ ++# define R300_DEPTH_ENDIAN_DWORD_SWAP (2 << 18) /* GUESS */ + + /* BEGIN: Vertex program instruction set */ + +diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c +new file mode 100644 +index 0000000..c3f4f69 +--- /dev/null ++++ b/drivers/gpu/drm/radeon/radeon_atombios.c +@@ -0,0 +1,700 @@ ++/* ++ * Copyright 2007-8 Advanced Micro Devices, Inc. ++ * Copyright 2008 Red Hat Inc. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: Dave Airlie ++ * Alex Deucher ++ */ ++#include "drmP.h" ++#include "radeon_drm.h" ++#include "radeon_drv.h" ++ ++#include "atom.h" ++#include "atom-bits.h" ++ ++ ++union atom_supported_devices { ++ struct _ATOM_SUPPORTED_DEVICES_INFO info; ++ struct _ATOM_SUPPORTED_DEVICES_INFO_2 info_2; ++ struct _ATOM_SUPPORTED_DEVICES_INFO_2d1 info_2d1; ++}; ++ ++static inline struct radeon_i2c_bus_rec radeon_lookup_gpio(struct drm_device *dev, uint8_t id) ++{ ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ struct atom_context *ctx = dev_priv->mode_info.atom_context; ++ ATOM_GPIO_I2C_ASSIGMENT gpio; ++ struct radeon_i2c_bus_rec i2c; ++ int index = GetIndexIntoMasterTable(DATA, GPIO_I2C_Info); ++ struct _ATOM_GPIO_I2C_INFO *i2c_info; ++ uint16_t data_offset; ++ ++ memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec)); ++ i2c.valid = false; ++ ++ atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset); ++ ++ i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset); ++ ++ gpio = i2c_info->asGPIO_Info[id]; ++ ++ i2c.mask_clk_reg = le16_to_cpu(gpio.usClkMaskRegisterIndex) * 4; ++ i2c.mask_data_reg = le16_to_cpu(gpio.usDataMaskRegisterIndex) * 4; ++ i2c.put_clk_reg = le16_to_cpu(gpio.usClkEnRegisterIndex) * 4; ++ i2c.put_data_reg = le16_to_cpu(gpio.usDataEnRegisterIndex) * 4; ++ i2c.get_clk_reg = le16_to_cpu(gpio.usClkY_RegisterIndex) * 4; ++ i2c.get_data_reg = le16_to_cpu(gpio.usDataY_RegisterIndex) * 4; ++ i2c.a_clk_reg = le16_to_cpu(gpio.usClkA_RegisterIndex) * 4; ++ i2c.a_data_reg = le16_to_cpu(gpio.usDataA_RegisterIndex) * 4; ++ i2c.mask_clk_mask = (1 << gpio.ucClkMaskShift); ++ i2c.mask_data_mask = (1 << gpio.ucDataMaskShift); ++ i2c.put_clk_mask = (1 << gpio.ucClkEnShift); ++ i2c.put_data_mask = (1 << gpio.ucDataEnShift); ++ i2c.get_clk_mask = (1 << gpio.ucClkY_Shift); ++ i2c.get_data_mask = (1 << gpio.ucDataY_Shift); ++ i2c.a_clk_mask = (1 << gpio.ucClkA_Shift); ++ i2c.a_data_mask = (1 << gpio.ucDataA_Shift); ++ i2c.valid = true; ++ ++ return i2c; ++} ++ ++static struct radeon_i2c_bus_rec radeon_parse_i2c_record(struct drm_device *dev, ATOM_I2C_RECORD *record) ++{ ++ return radeon_lookup_gpio(dev, record->sucI2cId.bfI2C_LineMux); ++} ++ ++static void radeon_atom_apply_quirks(struct drm_device *dev, int index) ++{ ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ struct radeon_mode_info *mode_info = &dev_priv->mode_info; ++ ++ if ((dev->pdev->device == 0x791e) && ++ (dev->pdev->subsystem_vendor == 0x1043) && ++ (dev->pdev->subsystem_device == 0x826d)) { ++ if ((mode_info->bios_connector[index].connector_type == CONNECTOR_HDMI_TYPE_A) && ++ (mode_info->bios_connector[index].tmds_type == TMDS_LVTMA)) { ++ mode_info->bios_connector[index].connector_type = CONNECTOR_DVI_D; ++ } ++ } ++ ++ if ((dev->pdev->device == 0x5653) && ++ (dev->pdev->subsystem_vendor == 0x1462) && ++ (dev->pdev->subsystem_device == 0x0291)) { ++ if (mode_info->bios_connector[index].connector_type == CONNECTOR_LVDS) { ++ mode_info->bios_connector[index].ddc_i2c.valid = false; ++ } ++ } ++} ++ ++const int object_connector_convert[] = ++{ CONNECTOR_NONE, ++ CONNECTOR_DVI_I, ++ CONNECTOR_DVI_I, ++ CONNECTOR_DVI_D, ++ CONNECTOR_DVI_D, ++ CONNECTOR_VGA, ++ CONNECTOR_CTV, ++ CONNECTOR_STV, ++ CONNECTOR_NONE, ++ CONNECTOR_DIN, ++ CONNECTOR_SCART, ++ CONNECTOR_HDMI_TYPE_A, ++ CONNECTOR_HDMI_TYPE_B, ++ CONNECTOR_HDMI_TYPE_B, ++ CONNECTOR_LVDS, ++ CONNECTOR_DIN, ++ CONNECTOR_NONE, ++ CONNECTOR_NONE, ++ CONNECTOR_NONE, ++ CONNECTOR_DISPLAY_PORT, ++}; ++ ++bool radeon_get_atom_connector_info_from_bios_object_table(struct drm_device *dev) ++{ ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ struct radeon_mode_info *mode_info = &dev_priv->mode_info; ++ struct atom_context *ctx = mode_info->atom_context; ++ int index = GetIndexIntoMasterTable(DATA, Object_Header); ++ uint16_t size, data_offset; ++ uint8_t frev, crev; ++ ATOM_CONNECTOR_OBJECT_TABLE *con_obj; ++ ATOM_INTEGRATED_SYSTEM_INFO_V2 *igp_obj = NULL; ++ ATOM_OBJECT_HEADER *obj_header; ++ int i, j; ++ ++ atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset); ++ ++ if (data_offset == 0) ++ return false; ++ ++ if (crev < 2) ++ return false; ++ ++ obj_header = (ATOM_OBJECT_HEADER *)(ctx->bios + data_offset); ++ ++ con_obj = (ATOM_CONNECTOR_OBJECT_TABLE *)(ctx->bios + data_offset + obj_header->usConnectorObjectTableOffset); ++ DRM_ERROR("Num of objects %d\n", con_obj->ucNumberOfObjects); ++ ++ for (i = 0; i < con_obj->ucNumberOfObjects; i++) { ++ ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *src_dst_table; ++ ATOM_COMMON_RECORD_HEADER *record; ++ uint8_t obj_id, num, obj_type; ++ int record_base; ++ uint16_t con_obj_id = le16_to_cpu(con_obj->asObjects[i].usObjectID); ++ ++ obj_id = (con_obj_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; ++ num = (con_obj_id & ENUM_ID_MASK) >> ENUM_ID_SHIFT; ++ obj_type = (con_obj_id & OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT; ++ if (obj_type != GRAPH_OBJECT_TYPE_CONNECTOR) ++ continue; ++ ++ DRM_ERROR("offset is %04x\n", le16_to_cpu(con_obj->asObjects[i].usSrcDstTableOffset)); ++ src_dst_table = (ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *) ++ (ctx->bios + data_offset + le16_to_cpu(con_obj->asObjects[i].usSrcDstTableOffset)); ++ ++ DRM_ERROR("object id %04x %02x\n", obj_id, src_dst_table->ucNumberOfSrc); ++ ++ if ((dev_priv->chip_family == CHIP_RS780) && ++ (obj_id == CONNECTOR_OBJECT_ID_PCIE_CONNECTOR)) { ++ uint32_t slot_config, ct; ++ ++ // TODO ++ } else ++ mode_info->bios_connector[i].connector_type = object_connector_convert[obj_id]; ++ ++ if (mode_info->bios_connector[i].connector_type == CONNECTOR_NONE) ++ mode_info->bios_connector[i].valid = false; ++ else ++ mode_info->bios_connector[i].valid = true; ++ mode_info->bios_connector[i].devices = 0; ++ ++ for (j = 0; j < src_dst_table->ucNumberOfSrc; j++) { ++ uint8_t sobj_id; ++ ++ sobj_id = (src_dst_table->usSrcObjectID[j] & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; ++ DRM_ERROR("src object id %04x %d\n", src_dst_table->usSrcObjectID[j], sobj_id); ++ ++ switch(sobj_id) { ++ case ENCODER_OBJECT_ID_INTERNAL_LVDS: ++ mode_info->bios_connector[i].devices |= (1 << ATOM_DEVICE_LCD1_INDEX); ++ break; ++ case ENCODER_OBJECT_ID_INTERNAL_TMDS1: ++ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: ++ mode_info->bios_connector[i].devices |= (1 << ATOM_DEVICE_DFP1_INDEX); ++ break; ++ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: ++ if (num == 1) ++ mode_info->bios_connector[i].devices |= (1 << ATOM_DEVICE_DFP1_INDEX); ++ else ++ mode_info->bios_connector[i].devices |= (1 << ATOM_DEVICE_DFP2_INDEX); ++ mode_info->bios_connector[i].tmds_type = TMDS_UNIPHY; ++ break; ++ case ENCODER_OBJECT_ID_INTERNAL_TMDS2: ++ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: ++ mode_info->bios_connector[i].devices |= (1 << ATOM_DEVICE_DFP2_INDEX); ++ mode_info->bios_connector[i].tmds_type = TMDS_EXT; ++ break; ++ case ENCODER_OBJECT_ID_INTERNAL_LVTM1: ++ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: ++ mode_info->bios_connector[i].devices |= (1 << ATOM_DEVICE_DFP3_INDEX); ++ mode_info->bios_connector[i].tmds_type = TMDS_LVTMA; ++ break; ++ case ENCODER_OBJECT_ID_INTERNAL_DAC1: ++ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: ++ if (mode_info->bios_connector[i].connector_type == CONNECTOR_DIN || ++ mode_info->bios_connector[i].connector_type == CONNECTOR_STV || ++ mode_info->bios_connector[i].connector_type == CONNECTOR_CTV) ++ mode_info->bios_connector[i].valid = false; ++ else ++ mode_info->bios_connector[i].devices |= (1 << ATOM_DEVICE_CRT1_INDEX); ++ mode_info->bios_connector[i].dac_type = DAC_PRIMARY; ++ break; ++ case ENCODER_OBJECT_ID_INTERNAL_DAC2: ++ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: ++ if (mode_info->bios_connector[i].connector_type == CONNECTOR_DIN || ++ mode_info->bios_connector[i].connector_type == CONNECTOR_STV || ++ mode_info->bios_connector[i].connector_type == CONNECTOR_CTV) ++ mode_info->bios_connector[i].valid = false; ++ else ++ mode_info->bios_connector[i].devices |= (1 << ATOM_DEVICE_CRT2_INDEX); ++ mode_info->bios_connector[i].dac_type = DAC_TVDAC; ++ break; ++ } ++ } ++ ++ record = (ATOM_COMMON_RECORD_HEADER *) ++ (ctx->bios + data_offset + le16_to_cpu(con_obj->asObjects[i].usRecordOffset)); ++ record_base = le16_to_cpu(con_obj->asObjects[i].usRecordOffset); ++ ++ while (record->ucRecordType > 0 && ++ record->ucRecordType <= ATOM_MAX_OBJECT_RECORD_NUMBER) { ++ DRM_ERROR("record type %d\n", record->ucRecordType); ++ ++ switch(record->ucRecordType) { ++ case ATOM_I2C_RECORD_TYPE: ++ mode_info->bios_connector[i].ddc_i2c = radeon_parse_i2c_record(dev, (ATOM_I2C_RECORD *)record); ++ break; ++ case ATOM_HPD_INT_RECORD_TYPE: ++ break; ++ case ATOM_CONNECTOR_DEVICE_TAG_RECORD_TYPE: ++ break; ++ } ++ record = (ATOM_COMMON_RECORD_HEADER *)((char *)record + record->ucRecordSize); ++ } ++ ++ } ++ return true; ++} ++ ++ ++bool radeon_get_atom_connector_info_from_bios_connector_table(struct drm_device *dev) ++{ ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ struct radeon_mode_info *mode_info = &dev_priv->mode_info; ++ struct atom_context *ctx = mode_info->atom_context; ++ int index = GetIndexIntoMasterTable(DATA, SupportedDevicesInfo); ++ uint16_t size, data_offset; ++ uint8_t frev, crev; ++ uint16_t device_support; ++ ++ union atom_supported_devices *supported_devices; ++ int i,j; ++ ++ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) ++ // FIXME this should return false for pre-r6xx chips ++ if (radeon_get_atom_connector_info_from_bios_object_table(dev)) ++ return true; ++ ++ atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset); ++ ++ supported_devices = (union atom_supported_devices *)(ctx->bios + data_offset); ++ ++ device_support = le16_to_cpu(supported_devices->info.usDeviceSupport); ++ ++ for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) { ++ ++ ATOM_CONNECTOR_INFO_I2C ci = supported_devices->info.asConnInfo[i]; ++ ++ if (!(device_support & (1 << i))) { ++ mode_info->bios_connector[i].valid = false; ++ continue; ++ } ++ ++ if (i == ATOM_DEVICE_CV_INDEX) { ++ DRM_DEBUG("Skipping Component Video\n"); ++ mode_info->bios_connector[i].valid = false; ++ continue; ++ } ++ ++ if (i == ATOM_DEVICE_TV1_INDEX) { ++ DRM_DEBUG("Skipping TV Out\n"); ++ mode_info->bios_connector[i].valid = false; ++ continue; ++ } ++ ++ mode_info->bios_connector[i].valid = true; ++ mode_info->bios_connector[i].output_id = ci.sucI2cId.sbfAccess.bfI2C_LineMux; ++ mode_info->bios_connector[i].devices = 1 << i; ++ mode_info->bios_connector[i].connector_type = ci.sucConnectorInfo.sbfAccess.bfConnectorType; ++ ++ if (mode_info->bios_connector[i].connector_type == CONNECTOR_NONE) { ++ mode_info->bios_connector[i].valid = false; ++ continue; ++ } ++ ++ mode_info->bios_connector[i].dac_type = ci.sucConnectorInfo.sbfAccess.bfAssociatedDAC; ++ ++ if ((i == ATOM_DEVICE_TV1_INDEX) || ++ (i == ATOM_DEVICE_TV2_INDEX) || ++ (i == ATOM_DEVICE_TV1_INDEX)) ++ mode_info->bios_connector[i].ddc_i2c.valid = false; ++ else if ((dev_priv->chip_family == CHIP_RS600) || ++ (dev_priv->chip_family == CHIP_RS690) || ++ (dev_priv->chip_family == CHIP_RS740)) { ++ if ((i == ATOM_DEVICE_DFP2_INDEX) || (i == ATOM_DEVICE_DFP3_INDEX)) ++ mode_info->bios_connector[i].ddc_i2c = ++ radeon_lookup_gpio(dev, ci.sucI2cId.sbfAccess.bfI2C_LineMux + 1); ++ else ++ mode_info->bios_connector[i].ddc_i2c = ++ radeon_lookup_gpio(dev, ci.sucI2cId.sbfAccess.bfI2C_LineMux); ++ } else ++ mode_info->bios_connector[i].ddc_i2c = ++ radeon_lookup_gpio(dev, ci.sucI2cId.sbfAccess.bfI2C_LineMux); ++ ++ if (i == ATOM_DEVICE_DFP1_INDEX) ++ mode_info->bios_connector[i].tmds_type = TMDS_INT; ++ else if (i == ATOM_DEVICE_DFP2_INDEX) { ++ if ((dev_priv->chip_family == CHIP_RS600) || ++ (dev_priv->chip_family == CHIP_RS690) || ++ (dev_priv->chip_family == CHIP_RS740)) ++ mode_info->bios_connector[i].tmds_type = TMDS_DDIA; ++ else ++ mode_info->bios_connector[i].tmds_type = TMDS_EXT; ++ } else if (i == ATOM_DEVICE_DFP3_INDEX) ++ mode_info->bios_connector[i].tmds_type = TMDS_LVTMA; ++ else ++ mode_info->bios_connector[i].tmds_type = TMDS_NONE; ++ ++ /* Always set the connector type to VGA for CRT1/CRT2. if they are ++ * shared with a DVI port, we'll pick up the DVI connector below when we ++ * merge the outputs ++ */ ++ if ((i == ATOM_DEVICE_CRT1_INDEX || i == ATOM_DEVICE_CRT2_INDEX) && ++ (mode_info->bios_connector[i].connector_type == CONNECTOR_DVI_I || ++ mode_info->bios_connector[i].connector_type == CONNECTOR_DVI_D || ++ mode_info->bios_connector[i].connector_type == CONNECTOR_DVI_A)) { ++ mode_info->bios_connector[i].connector_type = CONNECTOR_VGA; ++ } ++ ++ if (crev > 1) { ++ ATOM_CONNECTOR_INC_SRC_BITMAP isb = supported_devices->info_2.asIntSrcInfo[i]; ++ ++ switch(isb.ucIntSrcBitmap) { ++ case 0x4: ++ mode_info->bios_connector[i].hpd_mask = 0x1; ++ break; ++ case 0xa: ++ mode_info->bios_connector[i].hpd_mask = 0x100; ++ break; ++ default: ++ mode_info->bios_connector[i].hpd_mask = 0; ++ break; ++ } ++ } else { ++ mode_info->bios_connector[i].hpd_mask = 0; ++ } ++ ++ radeon_atom_apply_quirks(dev, i); ++ } ++ ++ /* CRTs/DFPs may share a port */ ++ for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) { ++ if (!mode_info->bios_connector[i].valid) ++ continue; ++ ++ for (j = 0; j < ATOM_MAX_SUPPORTED_DEVICE; j++) { ++ if (mode_info->bios_connector[j].valid && (i != j)) { ++ if (mode_info->bios_connector[i].output_id == ++ mode_info->bios_connector[j].output_id) { ++ if (((i == ATOM_DEVICE_DFP1_INDEX) || ++ (i == ATOM_DEVICE_DFP2_INDEX) || ++ (i == ATOM_DEVICE_DFP3_INDEX)) && ++ ((j == ATOM_DEVICE_CRT1_INDEX) || ++ (j == ATOM_DEVICE_CRT2_INDEX))) { ++ mode_info->bios_connector[i].dac_type = mode_info->bios_connector[j].dac_type; ++ mode_info->bios_connector[i].devices |= mode_info->bios_connector[j].devices; ++ mode_info->bios_connector[i].hpd_mask = mode_info->bios_connector[j].hpd_mask; ++ mode_info->bios_connector[j].valid = false; ++ } else if (((j == ATOM_DEVICE_DFP1_INDEX) || ++ (j == ATOM_DEVICE_DFP2_INDEX) || ++ (j == ATOM_DEVICE_DFP3_INDEX)) && ++ ((i == ATOM_DEVICE_CRT1_INDEX) || ++ (i == ATOM_DEVICE_CRT2_INDEX))) { ++ mode_info->bios_connector[j].dac_type = mode_info->bios_connector[i].dac_type; ++ mode_info->bios_connector[j].devices |= mode_info->bios_connector[i].devices; ++ mode_info->bios_connector[j].hpd_mask = mode_info->bios_connector[i].hpd_mask; ++ mode_info->bios_connector[i].valid = false; ++ } ++ } ++ } ++ } ++ } ++ ++ ++ DRM_DEBUG("BIOS Connector table\n"); ++ for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) { ++ if (!mode_info->bios_connector[i].valid) ++ continue; ++ ++ DRM_DEBUG("Port %d: ddc_type 0x%x, dac_type %d, tmds_type %d, connector type %d, hpd_mask %d\n", ++ i, mode_info->bios_connector[i].ddc_i2c.mask_clk_reg, ++ mode_info->bios_connector[i].dac_type, ++ mode_info->bios_connector[i].tmds_type, ++ mode_info->bios_connector[i].connector_type, ++ mode_info->bios_connector[i].hpd_mask); ++ } ++ return true; ++} ++ ++union firmware_info { ++ ATOM_FIRMWARE_INFO info; ++ ATOM_FIRMWARE_INFO_V1_2 info_12; ++ ATOM_FIRMWARE_INFO_V1_3 info_13; ++ ATOM_FIRMWARE_INFO_V1_4 info_14; ++}; ++ ++bool radeon_atom_get_clock_info(struct drm_device *dev) ++{ ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ struct radeon_mode_info *mode_info = &dev_priv->mode_info; ++ int index = GetIndexIntoMasterTable(DATA, FirmwareInfo); ++ union firmware_info *firmware_info; ++ uint8_t frev, crev; ++ struct radeon_pll *p1pll = &mode_info->p1pll; ++ struct radeon_pll *p2pll = &mode_info->p2pll; ++ struct radeon_pll *spll = &mode_info->spll; ++ struct radeon_pll *mpll = &mode_info->mpll; ++ uint16_t data_offset; ++ ++ atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset); ++ ++ firmware_info = (union firmware_info *)(mode_info->atom_context->bios + data_offset); ++ ++ if (firmware_info) { ++ /* pixel clocks */ ++ p1pll->reference_freq = le16_to_cpu(firmware_info->info.usReferenceClock); ++ p1pll->reference_div = 0; ++ ++ p1pll->pll_out_min = le16_to_cpu(firmware_info->info.usMinPixelClockPLL_Output); ++ p1pll->pll_out_max = le32_to_cpu(firmware_info->info.ulMaxPixelClockPLL_Output); ++ ++ if (p1pll->pll_out_min == 0) { ++ if (radeon_is_avivo(dev_priv)) ++ p1pll->pll_out_min = 64800; ++ else ++ p1pll->pll_out_min = 20000; ++ } ++ ++ p1pll->pll_in_min = le16_to_cpu(firmware_info->info.usMinPixelClockPLL_Input); ++ p1pll->pll_in_max = le16_to_cpu(firmware_info->info.usMaxPixelClockPLL_Input); ++ ++ *p2pll = *p1pll; ++ ++ /* system clock */ ++ spll->reference_freq = le16_to_cpu(firmware_info->info.usReferenceClock); ++ spll->reference_div = 0; ++ ++ spll->pll_out_min = le16_to_cpu(firmware_info->info.usMinEngineClockPLL_Output); ++ spll->pll_out_max = le32_to_cpu(firmware_info->info.ulMaxEngineClockPLL_Output); ++ ++ /* ??? */ ++ if (spll->pll_out_min == 0) { ++ if (radeon_is_avivo(dev_priv)) ++ spll->pll_out_min = 64800; ++ else ++ spll->pll_out_min = 20000; ++ } ++ ++ spll->pll_in_min = le16_to_cpu(firmware_info->info.usMinEngineClockPLL_Input); ++ spll->pll_in_max = le16_to_cpu(firmware_info->info.usMaxEngineClockPLL_Input); ++ ++ ++ /* memory clock */ ++ mpll->reference_freq = le16_to_cpu(firmware_info->info.usReferenceClock); ++ mpll->reference_div = 0; ++ ++ mpll->pll_out_min = le16_to_cpu(firmware_info->info.usMinMemoryClockPLL_Output); ++ mpll->pll_out_max = le32_to_cpu(firmware_info->info.ulMaxMemoryClockPLL_Output); ++ ++ /* ??? */ ++ if (mpll->pll_out_min == 0) { ++ if (radeon_is_avivo(dev_priv)) ++ mpll->pll_out_min = 64800; ++ else ++ mpll->pll_out_min = 20000; ++ } ++ ++ mpll->pll_in_min = le16_to_cpu(firmware_info->info.usMinMemoryClockPLL_Input); ++ mpll->pll_in_max = le16_to_cpu(firmware_info->info.usMaxMemoryClockPLL_Input); ++ ++ mode_info->sclk = le32_to_cpu(firmware_info->info.ulDefaultEngineClock); ++ mode_info->mclk = le32_to_cpu(firmware_info->info.ulDefaultMemoryClock); ++ ++ return true; ++ } ++ return false; ++} ++ ++ ++void radeon_atombios_get_tmds_info(struct radeon_encoder *encoder) ++{ ++ struct drm_device *dev = encoder->base.dev; ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ struct radeon_mode_info *mode_info = &dev_priv->mode_info; ++ int index = GetIndexIntoMasterTable(DATA, TMDS_Info); ++ uint16_t data_offset; ++ struct _ATOM_TMDS_INFO *tmds_info; ++ uint8_t frev, crev; ++ uint16_t maxfreq; ++ int i; ++ ++ atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset); ++ ++ tmds_info = (struct _ATOM_TMDS_INFO *)(mode_info->atom_context->bios + data_offset); ++ ++ maxfreq = le16_to_cpu(tmds_info->usMaxFrequency); ++ for (i = 0; i < 4; i++) { ++ encoder->tmds_pll[i].freq = le16_to_cpu(tmds_info->asMiscInfo[i].usFrequency); ++ encoder->tmds_pll[i].value = tmds_info->asMiscInfo[i].ucPLL_ChargePump & 0x3f; ++ encoder->tmds_pll[i].value |= (tmds_info->asMiscInfo[i].ucPLL_VCO_Gain & 0x3f) << 6; ++ encoder->tmds_pll[i].value |= (tmds_info->asMiscInfo[i].ucPLL_DutyCycle & 0xf) << 12; ++ encoder->tmds_pll[i].value |= (tmds_info->asMiscInfo[i].ucPLL_VoltageSwing & 0xf) << 16; ++ ++ DRM_DEBUG("TMDS PLL From ATOMBIOS %u %x\n", ++ encoder->tmds_pll[i].freq, ++ encoder->tmds_pll[i].value); ++ ++ if (maxfreq == encoder->tmds_pll[i].freq) { ++ encoder->tmds_pll[i].freq = 0xffffffff; ++ break; ++ } ++ } ++} ++ ++union lvds_info { ++ struct _ATOM_LVDS_INFO info; ++ struct _ATOM_LVDS_INFO_V12 info_12; ++}; ++ ++void radeon_atombios_get_lvds_info(struct radeon_encoder *encoder) ++{ ++ struct drm_device *dev = encoder->base.dev; ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ struct radeon_mode_info *mode_info = &dev_priv->mode_info; ++ int index = GetIndexIntoMasterTable(DATA, LVDS_Info); ++ uint16_t data_offset; ++ union lvds_info *lvds_info; ++ uint8_t frev, crev; ++ ++ atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset); ++ ++ lvds_info = (union lvds_info *)(mode_info->atom_context->bios + data_offset); ++ ++ if (lvds_info) { ++ encoder->dotclock = le16_to_cpu(lvds_info->info.sLCDTiming.usPixClk) * 10; ++ encoder->panel_xres = le16_to_cpu(lvds_info->info.sLCDTiming.usHActive); ++ encoder->panel_yres = le16_to_cpu(lvds_info->info.sLCDTiming.usVActive); ++ encoder->hblank = le16_to_cpu(lvds_info->info.sLCDTiming.usHBlanking_Time); ++ encoder->hoverplus = le16_to_cpu(lvds_info->info.sLCDTiming.usHSyncOffset); ++ encoder->hsync_width = le16_to_cpu(lvds_info->info.sLCDTiming.usHSyncWidth); ++ ++ encoder->vblank = le16_to_cpu(lvds_info->info.sLCDTiming.usVBlanking_Time); ++ encoder->voverplus = le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncOffset); ++ encoder->vsync_width = le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth); ++ encoder->panel_pwr_delay = le16_to_cpu(lvds_info->info.usOffDelayInMs); ++ } ++} ++ ++void radeon_atom_dyn_clk_setup(struct drm_device *dev, int enable) ++{ ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ struct radeon_mode_info *mode_info = &dev_priv->mode_info; ++ struct atom_context *ctx = mode_info->atom_context; ++ DYNAMIC_CLOCK_GATING_PS_ALLOCATION args; ++ int index = GetIndexIntoMasterTable(COMMAND, DynamicClockGating); ++ ++ args.ucEnable = enable; ++ ++ atom_execute_table(dev_priv->mode_info.atom_context, index, (uint32_t *)&args); ++} ++ ++void radeon_atom_static_pwrmgt_setup(struct drm_device *dev, int enable) ++{ ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ struct radeon_mode_info *mode_info = &dev_priv->mode_info; ++ struct atom_context *ctx = mode_info->atom_context; ++ ENABLE_ASIC_STATIC_PWR_MGT_PS_ALLOCATION args; ++ int index = GetIndexIntoMasterTable(COMMAND, EnableASIC_StaticPwrMgt); ++ ++ args.ucEnable = enable; ++ ++ atom_execute_table(dev_priv->mode_info.atom_context, index, (uint32_t *)&args); ++} ++ ++void radeon_atom_set_engine_clock(struct drm_device *dev, int eng_clock) ++{ ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ struct radeon_mode_info *mode_info = &dev_priv->mode_info; ++ struct atom_context *ctx = mode_info->atom_context; ++ SET_ENGINE_CLOCK_PS_ALLOCATION args; ++ int index = GetIndexIntoMasterTable(COMMAND, SetEngineClock); ++ ++ args.ulTargetEngineClock = eng_clock; /* 10 khz */ ++ ++ atom_execute_table(dev_priv->mode_info.atom_context, index, (uint32_t *)&args); ++} ++ ++void radeon_atom_set_memory_clock(struct drm_device *dev, int mem_clock) ++{ ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ struct radeon_mode_info *mode_info = &dev_priv->mode_info; ++ struct atom_context *ctx = mode_info->atom_context; ++ SET_MEMORY_CLOCK_PS_ALLOCATION args; ++ int index = GetIndexIntoMasterTable(COMMAND, SetMemoryClock); ++ ++ args.ulTargetMemoryClock = mem_clock; /* 10 khz */ ++ ++ atom_execute_table(dev_priv->mode_info.atom_context, index, (uint32_t *)&args); ++} ++ ++void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev) ++{ ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ uint32_t bios_2_scratch, bios_6_scratch; ++ ++ if (dev_priv->chip_family >= CHIP_R600) { ++ bios_2_scratch = RADEON_READ(RADEON_BIOS_0_SCRATCH); ++ bios_6_scratch = RADEON_READ(RADEON_BIOS_6_SCRATCH); ++ } else { ++ bios_2_scratch = RADEON_READ(RADEON_BIOS_0_SCRATCH); ++ bios_6_scratch = RADEON_READ(RADEON_BIOS_6_SCRATCH); ++ } ++ ++ /* let the bios control the backlight */ ++ bios_2_scratch &= ~ATOM_S2_VRI_BRIGHT_ENABLE; ++ ++ /* tell the bios not to handle mode switching */ ++ bios_6_scratch |= (ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH | ++ ATOM_S6_ACC_MODE); ++ ++ if (dev_priv->chip_family >= CHIP_R600) { ++ RADEON_WRITE(R600_BIOS_2_SCRATCH, bios_2_scratch); ++ RADEON_WRITE(R600_BIOS_6_SCRATCH, bios_6_scratch); ++ } else { ++ RADEON_WRITE(RADEON_BIOS_2_SCRATCH, bios_2_scratch); ++ RADEON_WRITE(RADEON_BIOS_6_SCRATCH, bios_6_scratch); ++ } ++ ++} ++ ++void ++radeon_atom_output_lock(struct drm_encoder *encoder, bool lock) ++{ ++ struct drm_device *dev = encoder->dev; ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ uint32_t bios_6_scratch; ++ ++ if (dev_priv->chip_family >= CHIP_R600) ++ bios_6_scratch = RADEON_READ(R600_BIOS_6_SCRATCH); ++ else ++ bios_6_scratch = RADEON_READ(RADEON_BIOS_6_SCRATCH); ++ ++ if (lock) ++ bios_6_scratch |= ATOM_S6_CRITICAL_STATE; ++ else ++ bios_6_scratch &= ~ATOM_S6_CRITICAL_STATE; ++ ++ if (dev_priv->chip_family >= CHIP_R600) ++ RADEON_WRITE(R600_BIOS_6_SCRATCH, bios_6_scratch); ++ else ++ RADEON_WRITE(RADEON_BIOS_6_SCRATCH, bios_6_scratch); ++} +diff --git a/drivers/gpu/drm/radeon/radeon_buffer.c b/drivers/gpu/drm/radeon/radeon_buffer.c +new file mode 100644 +index 0000000..bd5761a +--- /dev/null ++++ b/drivers/gpu/drm/radeon/radeon_buffer.c +@@ -0,0 +1,456 @@ ++/************************************************************************** ++ * ++ * Copyright 2007 Dave Airlie ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sub license, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, ++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR ++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE ++ * USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial portions ++ * of the Software. ++ * ++ * ++ **************************************************************************/ ++/* ++ * Authors: Dave Airlie ++ */ ++ ++#include "drmP.h" ++#include "radeon_drm.h" ++#include "radeon_drv.h" ++ ++struct drm_ttm_backend *radeon_create_ttm_backend_entry(struct drm_device * dev) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ ++ if (dev_priv->flags & RADEON_IS_AGP) ++ return drm_agp_init_ttm(dev); ++ else ++ return ati_pcigart_init_ttm(dev, &dev_priv->gart_info, radeon_gart_flush); ++} ++ ++int radeon_fence_types(struct drm_buffer_object *bo, uint32_t * class, uint32_t * type) ++{ ++ *class = 0; ++ *type = 1; ++ return 0; ++} ++ ++int radeon_invalidate_caches(struct drm_device * dev, uint64_t flags) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ RING_LOCALS; ++ ++ if (!dev_priv->cp_running) ++ return 0; ++ ++ BEGIN_RING(6); ++ RADEON_PURGE_CACHE(); ++ RADEON_PURGE_ZCACHE(); ++ RADEON_WAIT_UNTIL_3D_IDLE(); ++ ADVANCE_RING(); ++ COMMIT_RING(); ++ return 0; ++} ++ ++int radeon_init_mem_type(struct drm_device * dev, uint32_t type, ++ struct drm_mem_type_manager * man) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ ++ switch (type) { ++ case DRM_BO_MEM_LOCAL: ++ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | ++ _DRM_FLAG_MEMTYPE_CACHED; ++ man->drm_bus_maptype = 0; ++ break; ++ case DRM_BO_MEM_VRAM: ++ man->flags = _DRM_FLAG_MEMTYPE_FIXED | _DRM_FLAG_MEMTYPE_MAPPABLE | _DRM_FLAG_NEEDS_IOREMAP; ++ man->io_addr = NULL; ++ man->drm_bus_maptype = _DRM_FRAME_BUFFER; ++ man->io_offset = drm_get_resource_start(dev, 0); ++ man->io_size = drm_get_resource_len(dev, 0); ++ break; ++ case DRM_BO_MEM_TT: ++ if (dev_priv->flags & RADEON_IS_AGP) { ++ if (!(drm_core_has_AGP(dev) && dev->agp)) { ++ DRM_ERROR("AGP is not enabled for memory type %u\n", ++ (unsigned)type); ++ return -EINVAL; ++ } ++ man->io_offset = dev->agp->agp_info.aper_base; ++ man->io_size = dev->agp->agp_info.aper_size * 1024 * 1024; ++ man->io_addr = NULL; ++ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | ++ _DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_NEEDS_IOREMAP; ++ man->drm_bus_maptype = _DRM_AGP; ++ } else { ++ man->io_offset = dev_priv->gart_vm_start; ++ man->io_size = dev_priv->gart_size; ++ man->io_addr = NULL; ++ man->flags = _DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_MEMTYPE_MAPPABLE | _DRM_FLAG_MEMTYPE_CMA; ++ man->drm_bus_maptype = _DRM_SCATTER_GATHER; ++ } ++ break; ++ default: ++ DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); ++ return -EINVAL; ++ } ++ return 0; ++} ++ ++void radeon_emit_copy_blit(struct drm_device * dev, ++ uint32_t src_offset, ++ uint32_t dst_offset, ++ uint32_t pages) ++{ ++ uint32_t cur_pages; ++ uint32_t stride_bytes = PAGE_SIZE; ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ uint32_t format, pitch; ++ const uint32_t clip = (0x1fff) | (0x1fff << 16); ++ uint32_t stride_pixels; ++ int num_loops; ++ RING_LOCALS; ++ ++ if (!dev_priv) ++ return; ++ ++ /* 32-bit copy format */ ++ format = RADEON_COLOR_FORMAT_ARGB8888; ++ ++ /* radeon limited to 16k stride */ ++ stride_bytes &= 0x3fff; ++ /* radeon pitch is /64 */ ++ pitch = stride_bytes / 64; ++ ++ stride_pixels = stride_bytes / 4; ++ ++ num_loops = DIV_ROUND_UP(pages, 8191); ++ ++ BEGIN_RING(4 + (10 * num_loops)); ++ ++ while(pages > 0) { ++ cur_pages = pages; ++ if (cur_pages > 8191) ++ cur_pages = 8191; ++ pages -= cur_pages; ++ ++ /* pages are in Y direction - height ++ page width in X direction - width */ ++ OUT_RING(CP_PACKET3(RADEON_CNTL_BITBLT_MULTI, 8)); ++ OUT_RING(RADEON_GMC_SRC_PITCH_OFFSET_CNTL | ++ RADEON_GMC_DST_PITCH_OFFSET_CNTL | ++ RADEON_GMC_SRC_CLIPPING | RADEON_GMC_DST_CLIPPING | ++ RADEON_GMC_BRUSH_NONE | ++ (format << 8) | ++ RADEON_GMC_SRC_DATATYPE_COLOR | ++ RADEON_ROP3_S | ++ RADEON_DP_SRC_SOURCE_MEMORY | ++ RADEON_GMC_CLR_CMP_CNTL_DIS | RADEON_GMC_WR_MSK_DIS); ++ OUT_RING((pitch << 22) | (src_offset >> 10)); ++ OUT_RING((pitch << 22) | (dst_offset >> 10)); ++ OUT_RING(clip); // SRC _SC BOT_RITE ++ OUT_RING(0); // SC_TOP_LEFT ++ OUT_RING(clip); // SC_BOT_RITE ++ ++ OUT_RING(pages); ++ OUT_RING(pages); /* x - y */ ++ OUT_RING(cur_pages | (stride_pixels << 16)); ++ } ++ ++ OUT_RING(CP_PACKET0(RADEON_DSTCACHE_CTLSTAT, 0)); ++ OUT_RING(RADEON_RB2D_DC_FLUSH_ALL); ++ RADEON_WAIT_UNTIL_2D_IDLE(); ++ ADVANCE_RING(); ++ ++ COMMIT_RING(); ++ return; ++} ++ ++int radeon_move_blit(struct drm_buffer_object * bo, ++ int evict, int no_wait, struct drm_bo_mem_reg *new_mem, ++ struct drm_bo_mem_reg *old_mem) ++{ ++ struct drm_device *dev = bo->dev; ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ uint32_t old_start, new_start; ++ ++ old_start = old_mem->mm_node->start << PAGE_SHIFT; ++ new_start = new_mem->mm_node->start << PAGE_SHIFT; ++ ++ if (old_mem->mem_type == DRM_BO_MEM_VRAM) ++ old_start += dev_priv->fb_location; ++ if (old_mem->mem_type == DRM_BO_MEM_TT) ++ old_start += dev_priv->gart_vm_start; ++ ++ if (new_mem->mem_type == DRM_BO_MEM_VRAM) ++ new_start += dev_priv->fb_location; ++ if (new_mem->mem_type == DRM_BO_MEM_TT) ++ new_start += dev_priv->gart_vm_start; ++ ++ radeon_emit_copy_blit(bo->dev, ++ old_start, ++ new_start, ++ new_mem->num_pages); ++ ++ /* invalidate the chip caches */ ++ ++ return drm_bo_move_accel_cleanup(bo, evict, no_wait, 0, ++ DRM_FENCE_TYPE_EXE, 0, ++ new_mem); ++} ++ ++void radeon_emit_solid_fill(struct drm_device * dev, ++ uint32_t dst_offset, ++ uint32_t pages, uint8_t value) ++{ ++ uint32_t cur_pages; ++ uint32_t stride_bytes = PAGE_SIZE; ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ uint32_t format, pitch; ++ const uint32_t clip = (0x1fff) | (0x1fff << 16); ++ uint32_t stride_pixels; ++ int num_loops; ++ RING_LOCALS; ++ ++ if (!dev_priv) ++ return; ++ ++ if (!radeon_vram_zero) ++ return; ++ ++ /* 32-bit copy format */ ++ format = RADEON_COLOR_FORMAT_ARGB8888; ++ ++ /* radeon limited to 16k stride */ ++ stride_bytes &= 0x3fff; ++ /* radeon pitch is /64 */ ++ pitch = stride_bytes / 64; ++ ++ stride_pixels = stride_bytes / 4; ++ ++ num_loops = DIV_ROUND_UP(pages, 8191); ++ ++ BEGIN_RING(4 + (8*num_loops)); ++ ++ while(pages > 0) { ++ cur_pages = pages; ++ if (cur_pages > 8191) ++ cur_pages = 8191; ++ pages -= cur_pages; ++ ++ /* pages are in Y direction - height ++ page width in X direction - width */ ++ OUT_RING(CP_PACKET3(RADEON_CNTL_PAINT_MULTI, 6)); ++ OUT_RING(RADEON_GMC_DST_PITCH_OFFSET_CNTL | ++ RADEON_GMC_DST_CLIPPING | ++ RADEON_GMC_BRUSH_SOLID_COLOR | ++ (format << 8) | ++ RADEON_ROP3_P | ++ RADEON_CLR_CMP_SRC_SOURCE | ++ RADEON_GMC_CLR_CMP_CNTL_DIS | RADEON_GMC_WR_MSK_DIS); ++ OUT_RING((pitch << 22) | (dst_offset >> 10)); // PITCH ++ OUT_RING(0); // SC_TOP_LEFT // DST CLIPPING ++ OUT_RING(clip); // SC_BOT_RITE ++ ++ OUT_RING(0); // COLOR ++ ++ OUT_RING(pages); /* x - y */ ++ OUT_RING(cur_pages | (stride_pixels << 16)); ++ } ++ ++ OUT_RING(CP_PACKET0(RADEON_DSTCACHE_CTLSTAT, 0)); ++ OUT_RING(RADEON_RB2D_DC_FLUSH_ALL); ++ RADEON_WAIT_UNTIL_2D_IDLE(); ++ ADVANCE_RING(); ++ ++ COMMIT_RING(); ++ return; ++} ++ ++int radeon_move_zero_fill(struct drm_buffer_object * bo, ++ int evict, int no_wait, struct drm_bo_mem_reg *new_mem) ++{ ++ struct drm_device *dev = bo->dev; ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ uint32_t new_start; ++ ++ if (new_mem->mem_type != DRM_BO_MEM_VRAM) ++ return -1; ++ ++ ++ new_start = new_mem->mm_node->start << PAGE_SHIFT; ++ new_start += dev_priv->fb_location; ++ ++ radeon_emit_solid_fill(bo->dev, ++ new_start, ++ new_mem->num_pages, 0); ++ ++ /* invalidate the chip caches */ ++ ++ return drm_bo_move_accel_cleanup(bo, 1, no_wait, 0, ++ DRM_FENCE_TYPE_EXE, 0, ++ new_mem); ++} ++ ++static int radeon_move_flip(struct drm_buffer_object * bo, ++ int evict, int no_wait, struct drm_bo_mem_reg * new_mem) ++{ ++ struct drm_device *dev = bo->dev; ++ struct drm_bo_mem_reg tmp_mem; ++ int ret; ++ ++ tmp_mem = *new_mem; ++ ++ /* if we are flipping into LOCAL memory we have no TTM so create one */ ++ if (new_mem->mem_type == DRM_BO_MEM_LOCAL) { ++ tmp_mem.mm_node = NULL; ++ tmp_mem.proposed_flags = DRM_BO_FLAG_MEM_TT; ++ ++ ret = drm_bo_mem_space(bo, &tmp_mem, no_wait); ++ if (ret) ++ return ret; ++ ++ ret = drm_ttm_bind(bo->ttm, &tmp_mem); ++ if (ret) ++ goto out_cleanup; ++ } ++ ++ ret = radeon_move_blit(bo, 1, no_wait, &tmp_mem, &bo->mem); ++ if (ret) ++ goto out_cleanup; ++ ++ if (new_mem->mem_type == DRM_BO_MEM_LOCAL) { ++ ret = drm_bo_move_ttm(bo, evict, no_wait, new_mem); ++ } else { ++ tmp_mem.mm_node = NULL; ++ new_mem->mm_node = NULL; ++ } ++ ++out_cleanup: ++ if (tmp_mem.mm_node) { ++ mutex_lock(&dev->struct_mutex); ++ if (tmp_mem.mm_node != bo->pinned_node) ++ drm_mm_put_block(tmp_mem.mm_node); ++ tmp_mem.mm_node = NULL; ++ mutex_unlock(&dev->struct_mutex); ++ } ++ return ret; ++} ++ ++static int radeon_move_vram(struct drm_buffer_object * bo, ++ int evict, int no_wait, struct drm_bo_mem_reg * new_mem) ++{ ++ struct drm_device *dev = bo->dev; ++ struct drm_bo_mem_reg tmp_mem; ++ struct drm_bo_mem_reg *old_mem = &bo->mem; ++ int ret; ++ ++ /* old - LOCAL memory node bo->mem ++ tmp - TT type memory node ++ new - VRAM memory node */ ++ ++ tmp_mem = *old_mem; ++ tmp_mem.mm_node = NULL; ++ ++ if (old_mem->mem_type == DRM_BO_MEM_LOCAL) { ++ tmp_mem.proposed_flags = DRM_BO_FLAG_MEM_TT; ++ ++ ret = drm_bo_mem_space(bo, &tmp_mem, no_wait); ++ if (ret) ++ return ret; ++ } ++ ++ if (!bo->ttm) { ++ ret = drm_bo_add_ttm(bo); ++ if (ret) ++ goto out_cleanup; ++ } ++ ++ if (old_mem->mem_type == DRM_BO_MEM_LOCAL) { ++ ret = drm_bo_move_ttm(bo, evict, no_wait, &tmp_mem); ++ if (ret) ++ return ret; ++ } ++ ++ ret = radeon_move_blit(bo, 1, no_wait, new_mem, &bo->mem); ++ if (ret) ++ goto out_cleanup; ++ ++out_cleanup: ++ if (tmp_mem.mm_node) { ++ mutex_lock(&dev->struct_mutex); ++ if (tmp_mem.mm_node != bo->pinned_node) ++ drm_mm_put_block(tmp_mem.mm_node); ++ tmp_mem.mm_node = NULL; ++ mutex_unlock(&dev->struct_mutex); ++ } ++ return ret; ++} ++ ++int radeon_move(struct drm_buffer_object * bo, ++ int evict, int no_wait, struct drm_bo_mem_reg *new_mem) ++{ ++ struct drm_device *dev = bo->dev; ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ ++ if (!dev_priv->cp_running) ++ goto fallback; ++ ++ if (bo->mem.flags & DRM_BO_FLAG_CLEAN) /* need to implement solid fill */ ++ { ++ if (radeon_move_zero_fill(bo, evict, no_wait, new_mem)) ++ return drm_bo_move_zero(bo, evict, no_wait, new_mem); ++ return 0; ++ } ++ ++ if (new_mem->mem_type == DRM_BO_MEM_VRAM) { ++ if (radeon_move_vram(bo, evict, no_wait, new_mem)) ++ goto fallback; ++ } else { ++ if (radeon_move_flip(bo, evict, no_wait, new_mem)) ++ goto fallback; ++ } ++ return 0; ++fallback: ++ if (bo->mem.flags & DRM_BO_FLAG_CLEAN) ++ return drm_bo_move_zero(bo, evict, no_wait, new_mem); ++ else ++ return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); ++} ++ ++ ++/* ++ * i915_evict_flags: ++ * ++ * @bo: the buffer object to be evicted ++ * ++ * Return the bo flags for a buffer which is not mapped to the hardware. ++ * These will be placed in proposed_flags so that when the move is ++ * finished, they'll end up in bo->mem.flags ++ */ ++uint64_t radeon_evict_flags(struct drm_buffer_object *bo) ++{ ++ switch (bo->mem.mem_type) { ++ case DRM_BO_MEM_LOCAL: ++ case DRM_BO_MEM_TT: ++ return DRM_BO_FLAG_MEM_LOCAL; ++ default: ++ return DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_MEM_LOCAL; ++ } ++} +diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c +new file mode 100644 +index 0000000..107a081 +--- /dev/null ++++ b/drivers/gpu/drm/radeon/radeon_combios.c +@@ -0,0 +1,1404 @@ ++/* ++ * Copyright 2004 ATI Technologies Inc., Markham, Ontario ++ * Copyright 2007-8 Advanced Micro Devices, Inc. ++ * Copyright 2008 Red Hat Inc. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: Dave Airlie ++ * Alex Deucher ++ */ ++#include "drmP.h" ++#include "radeon_drm.h" ++#include "radeon_drv.h" ++ ++/* old legacy ATI BIOS routines */ ++ ++/* COMBIOS table offsets */ ++enum radeon_combios_table_offset ++{ ++ /* absolute offset tables */ ++ COMBIOS_ASIC_INIT_1_TABLE, ++ COMBIOS_BIOS_SUPPORT_TABLE, ++ COMBIOS_DAC_PROGRAMMING_TABLE, ++ COMBIOS_MAX_COLOR_DEPTH_TABLE, ++ COMBIOS_CRTC_INFO_TABLE, ++ COMBIOS_PLL_INFO_TABLE, ++ COMBIOS_TV_INFO_TABLE, ++ COMBIOS_DFP_INFO_TABLE, ++ COMBIOS_HW_CONFIG_INFO_TABLE, ++ COMBIOS_MULTIMEDIA_INFO_TABLE, ++ COMBIOS_TV_STD_PATCH_TABLE, ++ COMBIOS_LCD_INFO_TABLE, ++ COMBIOS_MOBILE_INFO_TABLE, ++ COMBIOS_PLL_INIT_TABLE, ++ COMBIOS_MEM_CONFIG_TABLE, ++ COMBIOS_SAVE_MASK_TABLE, ++ COMBIOS_HARDCODED_EDID_TABLE, ++ COMBIOS_ASIC_INIT_2_TABLE, ++ COMBIOS_CONNECTOR_INFO_TABLE, ++ COMBIOS_DYN_CLK_1_TABLE, ++ COMBIOS_RESERVED_MEM_TABLE, ++ COMBIOS_EXT_TMDS_INFO_TABLE, ++ COMBIOS_MEM_CLK_INFO_TABLE, ++ COMBIOS_EXT_DAC_INFO_TABLE, ++ COMBIOS_MISC_INFO_TABLE, ++ COMBIOS_CRT_INFO_TABLE, ++ COMBIOS_INTEGRATED_SYSTEM_INFO_TABLE, ++ COMBIOS_COMPONENT_VIDEO_INFO_TABLE, ++ COMBIOS_FAN_SPEED_INFO_TABLE, ++ COMBIOS_OVERDRIVE_INFO_TABLE, ++ COMBIOS_OEM_INFO_TABLE, ++ COMBIOS_DYN_CLK_2_TABLE, ++ COMBIOS_POWER_CONNECTOR_INFO_TABLE, ++ COMBIOS_I2C_INFO_TABLE, ++ /* relative offset tables */ ++ COMBIOS_ASIC_INIT_3_TABLE, /* offset from misc info */ ++ COMBIOS_ASIC_INIT_4_TABLE, /* offset from misc info */ ++ COMBIOS_ASIC_INIT_5_TABLE, /* offset from misc info */ ++ COMBIOS_RAM_RESET_TABLE, /* offset from mem config */ ++ COMBIOS_POWERPLAY_INFO_TABLE, /* offset from mobile info */ ++ COMBIOS_GPIO_INFO_TABLE, /* offset from mobile info */ ++ COMBIOS_LCD_DDC_INFO_TABLE, /* offset from mobile info */ ++ COMBIOS_TMDS_POWER_TABLE, /* offset from mobile info */ ++ COMBIOS_TMDS_POWER_ON_TABLE, /* offset from tmds power */ ++ COMBIOS_TMDS_POWER_OFF_TABLE, /* offset from tmds power */ ++}; ++ ++enum radeon_combios_ddc ++{ ++ DDC_NONE_DETECTED, ++ DDC_MONID, ++ DDC_DVI, ++ DDC_VGA, ++ DDC_CRT2, ++ DDC_LCD, ++ DDC_GPIO, ++}; ++ ++enum radeon_combios_connector ++{ ++ CONNECTOR_NONE_LEGACY, ++ CONNECTOR_PROPRIETARY_LEGACY, ++ CONNECTOR_CRT_LEGACY, ++ CONNECTOR_DVI_I_LEGACY, ++ CONNECTOR_DVI_D_LEGACY, ++ CONNECTOR_CTV_LEGACY, ++ CONNECTOR_STV_LEGACY, ++ CONNECTOR_UNSUPPORTED_LEGACY ++}; ++ ++static uint16_t combios_get_table_offset(struct drm_device *dev, enum radeon_combios_table_offset table) ++{ ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ int rev; ++ uint16_t offset = 0, check_offset; ++ ++ switch (table) { ++ /* absolute offset tables */ ++ case COMBIOS_ASIC_INIT_1_TABLE: ++ check_offset = radeon_bios16(dev_priv, dev_priv->bios_header_start + 0xc); ++ if (check_offset) ++ offset = check_offset; ++ break; ++ case COMBIOS_BIOS_SUPPORT_TABLE: ++ check_offset = radeon_bios16(dev_priv, dev_priv->bios_header_start + 0x14); ++ if (check_offset) ++ offset = check_offset; ++ break; ++ case COMBIOS_DAC_PROGRAMMING_TABLE: ++ check_offset = radeon_bios16(dev_priv, dev_priv->bios_header_start + 0x2a); ++ if (check_offset) ++ offset = check_offset; ++ break; ++ case COMBIOS_MAX_COLOR_DEPTH_TABLE: ++ check_offset = radeon_bios16(dev_priv, dev_priv->bios_header_start + 0x2c); ++ if (check_offset) ++ offset = check_offset; ++ break; ++ case COMBIOS_CRTC_INFO_TABLE: ++ check_offset = radeon_bios16(dev_priv, dev_priv->bios_header_start + 0x2e); ++ if (check_offset) ++ offset = check_offset; ++ break; ++ case COMBIOS_PLL_INFO_TABLE: ++ check_offset = radeon_bios16(dev_priv, dev_priv->bios_header_start + 0x30); ++ if (check_offset) ++ offset = check_offset; ++ break; ++ case COMBIOS_TV_INFO_TABLE: ++ check_offset = radeon_bios16(dev_priv, dev_priv->bios_header_start + 0x32); ++ if (check_offset) ++ offset = check_offset; ++ break; ++ case COMBIOS_DFP_INFO_TABLE: ++ check_offset = radeon_bios16(dev_priv, dev_priv->bios_header_start + 0x34); ++ if (check_offset) ++ offset = check_offset; ++ break; ++ case COMBIOS_HW_CONFIG_INFO_TABLE: ++ check_offset = radeon_bios16(dev_priv, dev_priv->bios_header_start + 0x36); ++ if (check_offset) ++ offset = check_offset; ++ break; ++ case COMBIOS_MULTIMEDIA_INFO_TABLE: ++ check_offset = radeon_bios16(dev_priv, dev_priv->bios_header_start + 0x38); ++ if (check_offset) ++ offset = check_offset; ++ break; ++ case COMBIOS_TV_STD_PATCH_TABLE: ++ check_offset = radeon_bios16(dev_priv, dev_priv->bios_header_start + 0x3e); ++ if (check_offset) ++ offset = check_offset; ++ break; ++ case COMBIOS_LCD_INFO_TABLE: ++ check_offset = radeon_bios16(dev_priv, dev_priv->bios_header_start + 0x40); ++ if (check_offset) ++ offset = check_offset; ++ break; ++ case COMBIOS_MOBILE_INFO_TABLE: ++ check_offset = radeon_bios16(dev_priv, dev_priv->bios_header_start + 0x42); ++ if (check_offset) ++ offset = check_offset; ++ break; ++ case COMBIOS_PLL_INIT_TABLE: ++ check_offset = radeon_bios16(dev_priv, dev_priv->bios_header_start + 0x46); ++ if (check_offset) ++ offset = check_offset; ++ break; ++ case COMBIOS_MEM_CONFIG_TABLE: ++ check_offset = radeon_bios16(dev_priv, dev_priv->bios_header_start + 0x48); ++ if (check_offset) ++ offset = check_offset; ++ break; ++ case COMBIOS_SAVE_MASK_TABLE: ++ check_offset = radeon_bios16(dev_priv, dev_priv->bios_header_start + 0x4a); ++ if (check_offset) ++ offset = check_offset; ++ break; ++ case COMBIOS_HARDCODED_EDID_TABLE: ++ check_offset = radeon_bios16(dev_priv, dev_priv->bios_header_start + 0x4c); ++ if (check_offset) ++ offset = check_offset; ++ break; ++ case COMBIOS_ASIC_INIT_2_TABLE: ++ check_offset = radeon_bios16(dev_priv, dev_priv->bios_header_start + 0x4e); ++ if (check_offset) ++ offset = check_offset; ++ break; ++ case COMBIOS_CONNECTOR_INFO_TABLE: ++ check_offset = radeon_bios16(dev_priv, dev_priv->bios_header_start + 0x50); ++ if (check_offset) ++ offset = check_offset; ++ break; ++ case COMBIOS_DYN_CLK_1_TABLE: ++ check_offset = radeon_bios16(dev_priv, dev_priv->bios_header_start + 0x52); ++ if (check_offset) ++ offset = check_offset; ++ break; ++ case COMBIOS_RESERVED_MEM_TABLE: ++ check_offset = radeon_bios16(dev_priv, dev_priv->bios_header_start + 0x54); ++ if (check_offset) ++ offset = check_offset; ++ break; ++ case COMBIOS_EXT_TMDS_INFO_TABLE: ++ check_offset = radeon_bios16(dev_priv, dev_priv->bios_header_start + 0x58); ++ if (check_offset) ++ offset = check_offset; ++ break; ++ case COMBIOS_MEM_CLK_INFO_TABLE: ++ check_offset = radeon_bios16(dev_priv, dev_priv->bios_header_start + 0x5a); ++ if (check_offset) ++ offset = check_offset; ++ break; ++ case COMBIOS_EXT_DAC_INFO_TABLE: ++ check_offset = radeon_bios16(dev_priv, dev_priv->bios_header_start + 0x5c); ++ if (check_offset) ++ offset = check_offset; ++ break; ++ case COMBIOS_MISC_INFO_TABLE: ++ check_offset = radeon_bios16(dev_priv, dev_priv->bios_header_start + 0x5e); ++ if (check_offset) ++ offset = check_offset; ++ break; ++ case COMBIOS_CRT_INFO_TABLE: ++ check_offset = radeon_bios16(dev_priv, dev_priv->bios_header_start + 0x60); ++ if (check_offset) ++ offset = check_offset; ++ break; ++ case COMBIOS_INTEGRATED_SYSTEM_INFO_TABLE: ++ check_offset = radeon_bios16(dev_priv, dev_priv->bios_header_start + 0x62); ++ if (check_offset) ++ offset = check_offset; ++ break; ++ case COMBIOS_COMPONENT_VIDEO_INFO_TABLE: ++ check_offset = radeon_bios16(dev_priv, dev_priv->bios_header_start + 0x64); ++ if (check_offset) ++ offset = check_offset; ++ break; ++ case COMBIOS_FAN_SPEED_INFO_TABLE: ++ check_offset = radeon_bios16(dev_priv, dev_priv->bios_header_start + 0x66); ++ if (check_offset) ++ offset = check_offset; ++ break; ++ case COMBIOS_OVERDRIVE_INFO_TABLE: ++ check_offset = radeon_bios16(dev_priv, dev_priv->bios_header_start + 0x68); ++ if (check_offset) ++ offset = check_offset; ++ break; ++ case COMBIOS_OEM_INFO_TABLE: ++ check_offset = radeon_bios16(dev_priv, dev_priv->bios_header_start + 0x6a); ++ if (check_offset) ++ offset = check_offset; ++ break; ++ case COMBIOS_DYN_CLK_2_TABLE: ++ check_offset = radeon_bios16(dev_priv, dev_priv->bios_header_start + 0x6c); ++ if (check_offset) ++ offset = check_offset; ++ break; ++ case COMBIOS_POWER_CONNECTOR_INFO_TABLE: ++ check_offset = radeon_bios16(dev_priv, dev_priv->bios_header_start + 0x6e); ++ if (check_offset) ++ offset = check_offset; ++ break; ++ case COMBIOS_I2C_INFO_TABLE: ++ check_offset = radeon_bios16(dev_priv, dev_priv->bios_header_start + 0x70); ++ if (check_offset) ++ offset = check_offset; ++ break; ++ /* relative offset tables */ ++ case COMBIOS_ASIC_INIT_3_TABLE: /* offset from misc info */ ++ check_offset = combios_get_table_offset(dev, COMBIOS_MISC_INFO_TABLE); ++ if (check_offset) { ++ rev = radeon_bios8(dev_priv, check_offset); ++ if (rev > 0) { ++ check_offset = radeon_bios16(dev_priv, check_offset + 0x3); ++ if (check_offset) ++ offset = check_offset; ++ } ++ } ++ break; ++ case COMBIOS_ASIC_INIT_4_TABLE: /* offset from misc info */ ++ check_offset = combios_get_table_offset(dev, COMBIOS_MISC_INFO_TABLE); ++ if (check_offset) { ++ rev = radeon_bios8(dev_priv, check_offset); ++ if (rev > 0) { ++ check_offset = radeon_bios16(dev_priv, check_offset + 0x5); ++ if (check_offset) ++ offset = check_offset; ++ } ++ } ++ break; ++ case COMBIOS_ASIC_INIT_5_TABLE: /* offset from misc info */ ++ check_offset = combios_get_table_offset(dev, COMBIOS_MISC_INFO_TABLE); ++ if (check_offset) { ++ rev = radeon_bios8(dev_priv, check_offset); ++ if (rev == 2) { ++ check_offset = radeon_bios16(dev_priv, check_offset + 0x9); ++ if (check_offset) ++ offset = check_offset; ++ } ++ } ++ break; ++ case COMBIOS_RAM_RESET_TABLE: /* offset from mem config */ ++ check_offset = combios_get_table_offset(dev, COMBIOS_MEM_CONFIG_TABLE); ++ if (check_offset) { ++ while (radeon_bios8(dev_priv, check_offset++)); ++ check_offset += 2; ++ if (check_offset) ++ offset = check_offset; ++ } ++ break; ++ case COMBIOS_POWERPLAY_INFO_TABLE: /* offset from mobile info */ ++ check_offset = combios_get_table_offset(dev, COMBIOS_MOBILE_INFO_TABLE); ++ if (check_offset) { ++ check_offset = radeon_bios16(dev_priv, check_offset + 0x11); ++ if (check_offset) ++ offset = check_offset; ++ } ++ break; ++ case COMBIOS_GPIO_INFO_TABLE: /* offset from mobile info */ ++ check_offset = combios_get_table_offset(dev, COMBIOS_MOBILE_INFO_TABLE); ++ if (check_offset) { ++ check_offset = radeon_bios16(dev_priv, check_offset + 0x13); ++ if (check_offset) ++ offset = check_offset; ++ } ++ break; ++ case COMBIOS_LCD_DDC_INFO_TABLE: /* offset from mobile info */ ++ check_offset = combios_get_table_offset(dev, COMBIOS_MOBILE_INFO_TABLE); ++ if (check_offset) { ++ check_offset = radeon_bios16(dev_priv, check_offset + 0x15); ++ if (check_offset) ++ offset = check_offset; ++ } ++ break; ++ case COMBIOS_TMDS_POWER_TABLE: /* offset from mobile info */ ++ check_offset = combios_get_table_offset(dev, COMBIOS_MOBILE_INFO_TABLE); ++ if (check_offset) { ++ check_offset = radeon_bios16(dev_priv, check_offset + 0x17); ++ if (check_offset) ++ offset = check_offset; ++ } ++ break; ++ case COMBIOS_TMDS_POWER_ON_TABLE: /* offset from tmds power */ ++ check_offset = combios_get_table_offset(dev, COMBIOS_TMDS_POWER_TABLE); ++ if (check_offset) { ++ check_offset = radeon_bios16(dev_priv, check_offset + 0x2); ++ if (check_offset) ++ offset = check_offset; ++ } ++ break; ++ case COMBIOS_TMDS_POWER_OFF_TABLE: /* offset from tmds power */ ++ check_offset = combios_get_table_offset(dev, COMBIOS_TMDS_POWER_TABLE); ++ if (check_offset) { ++ check_offset = radeon_bios16(dev_priv, check_offset + 0x4); ++ if (check_offset) ++ offset = check_offset; ++ } ++ break; ++ default: ++ break; ++ } ++ ++ return offset; ++ ++} ++ ++struct radeon_i2c_bus_rec combios_setup_i2c_bus(int ddc_line) ++{ ++ struct radeon_i2c_bus_rec i2c; ++ ++ i2c.mask_clk_mask = RADEON_GPIO_EN_1; ++ i2c.mask_data_mask = RADEON_GPIO_EN_0; ++ i2c.a_clk_mask = RADEON_GPIO_A_1; ++ i2c.a_data_mask = RADEON_GPIO_A_0; ++ i2c.put_clk_mask = RADEON_GPIO_EN_1; ++ i2c.put_data_mask = RADEON_GPIO_EN_0; ++ i2c.get_clk_mask = RADEON_GPIO_Y_1; ++ i2c.get_data_mask = RADEON_GPIO_Y_0; ++ if ((ddc_line == RADEON_LCD_GPIO_MASK) || ++ (ddc_line == RADEON_MDGPIO_EN_REG)) { ++ i2c.mask_clk_reg = ddc_line; ++ i2c.mask_data_reg = ddc_line; ++ i2c.a_clk_reg = ddc_line; ++ i2c.a_data_reg = ddc_line; ++ i2c.put_clk_reg = ddc_line; ++ i2c.put_data_reg = ddc_line; ++ i2c.get_clk_reg = ddc_line + 4; ++ i2c.get_data_reg = ddc_line + 4; ++ } else { ++ i2c.mask_clk_reg = ddc_line; ++ i2c.mask_data_reg = ddc_line; ++ i2c.a_clk_reg = ddc_line; ++ i2c.a_data_reg = ddc_line; ++ i2c.put_clk_reg = ddc_line; ++ i2c.put_data_reg = ddc_line; ++ i2c.get_clk_reg = ddc_line; ++ i2c.get_data_reg = ddc_line; ++ } ++ ++ if (ddc_line) ++ i2c.valid = true; ++ else ++ i2c.valid = false; ++ ++ return i2c; ++} ++ ++bool radeon_combios_get_clock_info(struct drm_device *dev) ++{ ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ struct radeon_mode_info *mode_info = &dev_priv->mode_info; ++ uint16_t pll_info; ++ struct radeon_pll *p1pll = &mode_info->p1pll; ++ struct radeon_pll *p2pll = &mode_info->p2pll; ++ struct radeon_pll *spll = &mode_info->spll; ++ struct radeon_pll *mpll = &mode_info->mpll; ++ int8_t rev; ++ uint16_t sclk, mclk; ++ ++ pll_info = combios_get_table_offset(dev, COMBIOS_PLL_INFO_TABLE); ++ if (pll_info) { ++ rev = radeon_bios8(dev_priv, pll_info); ++ ++ /* pixel clocks */ ++ p1pll->reference_freq = radeon_bios16(dev_priv, pll_info + 0xe); ++ p1pll->reference_div = radeon_bios16(dev_priv, pll_info + 0x10); ++ p1pll->pll_out_min = radeon_bios32(dev_priv, pll_info + 0x12); ++ p1pll->pll_out_max = radeon_bios32(dev_priv, pll_info + 0x16); ++ ++ if (rev > 9) { ++ p1pll->pll_in_min = radeon_bios32(dev_priv, pll_info + 0x36); ++ p1pll->pll_in_max = radeon_bios32(dev_priv, pll_info + 0x3a); ++ } else { ++ p1pll->pll_in_min = 40; ++ p1pll->pll_in_max = 500; ++ } ++ *p2pll = *p1pll; ++ ++ /* system clock */ ++ spll->reference_freq = radeon_bios16(dev_priv, pll_info + 0x1a); ++ spll->reference_div = radeon_bios16(dev_priv, pll_info + 0x1c); ++ spll->pll_out_min = radeon_bios32(dev_priv, pll_info + 0x1e); ++ spll->pll_out_max = radeon_bios32(dev_priv, pll_info + 0x22); ++ ++ if (rev > 10) { ++ spll->pll_in_min = radeon_bios32(dev_priv, pll_info + 0x48); ++ spll->pll_in_max = radeon_bios32(dev_priv, pll_info + 0x4c); ++ } else { ++ /* ??? */ ++ spll->pll_in_min = 40; ++ spll->pll_in_max = 500; ++ } ++ ++ /* memory clock */ ++ mpll->reference_freq = radeon_bios16(dev_priv, pll_info + 0x26); ++ mpll->reference_div = radeon_bios16(dev_priv, pll_info + 0x28); ++ mpll->pll_out_min = radeon_bios32(dev_priv, pll_info + 0x2a); ++ mpll->pll_out_max = radeon_bios32(dev_priv, pll_info + 0x2e); ++ ++ if (rev > 10) { ++ mpll->pll_in_min = radeon_bios32(dev_priv, pll_info + 0x5a); ++ mpll->pll_in_max = radeon_bios32(dev_priv, pll_info + 0x5e); ++ } else { ++ /* ??? */ ++ mpll->pll_in_min = 40; ++ mpll->pll_in_max = 500; ++ } ++ ++ /* default sclk/mclk */ ++ sclk = radeon_bios16(dev_priv, pll_info + 0x8); ++ mclk = radeon_bios16(dev_priv, pll_info + 0xa); ++ if (sclk == 0) ++ sclk = 200; ++ if (mclk == 0) ++ mclk = 200; ++ ++ mode_info->sclk = sclk; ++ mode_info->mclk = mclk; ++ ++ return true; ++ } ++ return false; ++} ++ ++bool radeon_combios_get_primary_dac_info(struct radeon_encoder *encoder) ++{ ++ struct drm_device *dev = encoder->base.dev; ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ uint16_t dac_info; ++ uint8_t rev, bg, dac; ++ ++ /* check CRT table */ ++ dac_info = combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE); ++ if (dac_info) { ++ rev = radeon_bios8(dev_priv, dac_info) & 0x3; ++ if (rev < 2) { ++ bg = radeon_bios8(dev_priv, dac_info + 0x2) & 0xf; ++ dac = (radeon_bios8(dev_priv, dac_info + 0x2) >> 4) & 0xf; ++ encoder->ps2_pdac_adj = (bg << 8) | (dac); ++ ++ return true; ++ } else { ++ bg = radeon_bios8(dev_priv, dac_info + 0x2) & 0xf; ++ dac = radeon_bios8(dev_priv, dac_info + 0x3) & 0xf; ++ encoder->ps2_pdac_adj = (bg << 8) | (dac); ++ ++ return true; ++ } ++ ++ } ++ ++ return false; ++} ++ ++bool radeon_combios_get_tv_dac_info(struct radeon_encoder *encoder) ++{ ++ struct drm_device *dev = encoder->base.dev; ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ uint16_t dac_info; ++ uint8_t rev, bg, dac; ++ ++ /* first check TV table */ ++ dac_info = combios_get_table_offset(dev, COMBIOS_TV_INFO_TABLE); ++ if (dac_info) { ++ rev = radeon_bios8(dev_priv, dac_info + 0x3); ++ if (rev > 4) { ++ bg = radeon_bios8(dev_priv, dac_info + 0xc) & 0xf; ++ dac = radeon_bios8(dev_priv, dac_info + 0xd) & 0xf; ++ encoder->ps2_tvdac_adj = (bg << 16) | (dac << 20); ++ ++ bg = radeon_bios8(dev_priv, dac_info + 0xe) & 0xf; ++ dac = radeon_bios8(dev_priv, dac_info + 0xf) & 0xf; ++ encoder->pal_tvdac_adj = (bg << 16) | (dac << 20); ++ ++ bg = radeon_bios8(dev_priv, dac_info + 0x10) & 0xf; ++ dac = radeon_bios8(dev_priv, dac_info + 0x11) & 0xf; ++ encoder->ntsc_tvdac_adj = (bg << 16) | (dac << 20); ++ ++ return true; ++ } else if (rev > 1) { ++ bg = radeon_bios8(dev_priv, dac_info + 0xc) & 0xf; ++ dac = (radeon_bios8(dev_priv, dac_info + 0xc) >> 4) & 0xf; ++ encoder->ps2_tvdac_adj = (bg << 16) | (dac << 20); ++ ++ bg = radeon_bios8(dev_priv, dac_info + 0xd) & 0xf; ++ dac = (radeon_bios8(dev_priv, dac_info + 0xd) >> 4) & 0xf; ++ encoder->pal_tvdac_adj = (bg << 16) | (dac << 20); ++ ++ bg = radeon_bios8(dev_priv, dac_info + 0xe) & 0xf; ++ dac = (radeon_bios8(dev_priv, dac_info + 0xe) >> 4) & 0xf; ++ encoder->ntsc_tvdac_adj = (bg << 16) | (dac << 20); ++ ++ return true; ++ } ++ } ++ ++ /* then check CRT table */ ++ dac_info = combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE); ++ if (dac_info) { ++ rev = radeon_bios8(dev_priv, dac_info) & 0x3; ++ if (rev < 2) { ++ bg = radeon_bios8(dev_priv, dac_info + 0x3) & 0xf; ++ dac = (radeon_bios8(dev_priv, dac_info + 0x3) >> 4) & 0xf; ++ encoder->ps2_tvdac_adj = (bg << 16) | (dac << 20); ++ encoder->pal_tvdac_adj = encoder->ps2_tvdac_adj; ++ encoder->ntsc_tvdac_adj = encoder->ps2_tvdac_adj; ++ ++ return true; ++ } else { ++ bg = radeon_bios8(dev_priv, dac_info + 0x4) & 0xf; ++ dac = radeon_bios8(dev_priv, dac_info + 0x5) & 0xf; ++ encoder->ps2_tvdac_adj = (bg << 16) | (dac << 20); ++ encoder->pal_tvdac_adj = encoder->ps2_tvdac_adj; ++ encoder->ntsc_tvdac_adj = encoder->ps2_tvdac_adj; ++ ++ return true; ++ } ++ ++ } ++ ++ return false; ++} ++ ++bool radeon_combios_get_tv_info(struct radeon_encoder *encoder) ++{ ++ struct drm_device *dev = encoder->base.dev; ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ uint16_t tv_info; ++ ++ tv_info = combios_get_table_offset(dev, COMBIOS_TV_INFO_TABLE); ++ if (tv_info) { ++ if (radeon_bios8(dev_priv, tv_info + 6) == 'T') { ++ switch (radeon_bios8(dev_priv, tv_info + 7) & 0xf) { ++ case 1: ++ encoder->tv_std = TV_STD_NTSC; ++ DRM_INFO("Default TV standard: NTSC\n"); ++ break; ++ case 2: ++ encoder->tv_std = TV_STD_PAL; ++ DRM_INFO("Default TV standard: PAL\n"); ++ break; ++ case 3: ++ encoder->tv_std = TV_STD_PAL_M; ++ DRM_INFO("Default TV standard: PAL-M\n"); ++ break; ++ case 4: ++ encoder->tv_std = TV_STD_PAL_60; ++ DRM_INFO("Default TV standard: PAL-60\n"); ++ break; ++ case 5: ++ encoder->tv_std = TV_STD_NTSC_J; ++ DRM_INFO("Default TV standard: NTSC-J\n"); ++ break; ++ case 6: ++ encoder->tv_std = TV_STD_SCART_PAL; ++ DRM_INFO("Default TV standard: SCART-PAL\n"); ++ break; ++ default: ++ encoder->tv_std = TV_STD_NTSC; ++ DRM_INFO("Unknown TV standard; defaulting to NTSC\n"); ++ break; ++ } ++ ++ switch ((radeon_bios8(dev_priv, tv_info + 9) >> 2) & 0x3) { ++ case 0: ++ DRM_INFO("29.498928713 MHz TV ref clk\n"); ++ break; ++ case 1: ++ DRM_INFO("28.636360000 MHz TV ref clk\n"); ++ break; ++ case 2: ++ DRM_INFO("14.318180000 MHz TV ref clk\n"); ++ break; ++ case 3: ++ DRM_INFO("27.000000000 MHz TV ref clk\n"); ++ break; ++ default: ++ break; ++ } ++ return true; ++ } ++ } ++ return false; ++} ++ ++bool radeon_combios_get_lvds_info(struct radeon_encoder *encoder) ++{ ++ struct drm_device *dev = encoder->base.dev; ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ uint16_t lcd_info; ++ uint32_t panel_setup; ++ char stmp[30]; ++ int tmp, i; ++ ++ lcd_info = combios_get_table_offset(dev, COMBIOS_LCD_INFO_TABLE); ++ ++ if (lcd_info) { ++ for (i = 0; i < 24; i++) ++ stmp[i] = radeon_bios8(dev_priv, lcd_info + i + 1); ++ stmp[24] = 0; ++ ++ DRM_INFO("Panel ID String: %s\n", stmp); ++ ++ encoder->panel_xres = radeon_bios16(dev_priv, lcd_info + 25); ++ encoder->panel_yres = radeon_bios16(dev_priv, lcd_info + 27); ++ ++ DRM_INFO("Panel Size %dx%d\n", encoder->panel_xres, encoder->panel_yres); ++ ++ encoder->panel_vcc_delay = radeon_bios16(dev_priv, lcd_info + 44); ++ if (encoder->panel_vcc_delay > 2000 || encoder->panel_vcc_delay < 0) ++ encoder->panel_vcc_delay = 2000; ++ ++ encoder->panel_pwr_delay = radeon_bios16(dev_priv, lcd_info + 0x24); ++ encoder->panel_digon_delay = radeon_bios16(dev_priv, lcd_info + 0x38) & 0xf; ++ encoder->panel_blon_delay = (radeon_bios16(dev_priv, lcd_info + 0x38) >> 4) & 0xf; ++ ++ encoder->panel_ref_divider = radeon_bios16(dev_priv, lcd_info + 46); ++ encoder->panel_post_divider = radeon_bios8(dev_priv, lcd_info + 48); ++ encoder->panel_fb_divider = radeon_bios16(dev_priv, lcd_info + 49); ++ if ((encoder->panel_ref_divider != 0) && ++ (encoder->panel_fb_divider > 3)) ++ encoder->use_bios_dividers = true; ++ ++ panel_setup = radeon_bios32(dev_priv, lcd_info + 0x39); ++ encoder->lvds_gen_cntl = 0xff00; ++ if (panel_setup & 0x1) ++ encoder->lvds_gen_cntl |= RADEON_LVDS_PANEL_FORMAT; ++ ++ if ((panel_setup >> 4) & 0x1) ++ encoder->lvds_gen_cntl |= RADEON_LVDS_PANEL_TYPE; ++ ++ switch ((panel_setup >> 8) & 0x7) { ++ case 0: ++ encoder->lvds_gen_cntl |= RADEON_LVDS_NO_FM; ++ break; ++ case 1: ++ encoder->lvds_gen_cntl |= RADEON_LVDS_2_GREY; ++ break; ++ case 2: ++ encoder->lvds_gen_cntl |= RADEON_LVDS_4_GREY; ++ break; ++ default: ++ break; ++ } ++ ++ if ((panel_setup >> 16) & 0x1) ++ encoder->lvds_gen_cntl |= RADEON_LVDS_FP_POL_LOW; ++ ++ if ((panel_setup >> 17) & 0x1) ++ encoder->lvds_gen_cntl |= RADEON_LVDS_LP_POL_LOW; ++ ++ if ((panel_setup >> 18) & 0x1) ++ encoder->lvds_gen_cntl |= RADEON_LVDS_DTM_POL_LOW; ++ ++ if ((panel_setup >> 23) & 0x1) ++ encoder->lvds_gen_cntl |= RADEON_LVDS_BL_CLK_SEL; ++ ++ encoder->lvds_gen_cntl |= (panel_setup & 0xf0000000); ++ ++ ++ for (i = 0; i < 32; i++) { ++ tmp = radeon_bios16(dev_priv, lcd_info + 64 + i * 2); ++ if (tmp == 0) break; ++ ++ if ((radeon_bios16(dev_priv, tmp) == encoder->panel_xres) && ++ (radeon_bios16(dev_priv, tmp + 2) == encoder->panel_yres)) { ++ encoder->hblank = (radeon_bios16(dev_priv, tmp + 17) - ++ radeon_bios16(dev_priv, tmp + 19)) * 8; ++ encoder->hoverplus = (radeon_bios16(dev_priv, tmp + 21) - ++ radeon_bios16(dev_priv, tmp + 19) - 1) * 8; ++ encoder->hsync_width = radeon_bios8(dev_priv, tmp + 23) * 8; ++ ++ encoder->vblank = (radeon_bios16(dev_priv, tmp + 24) - ++ radeon_bios16(dev_priv, tmp + 26)); ++ encoder->voverplus = ((radeon_bios16(dev_priv, tmp + 28) & 0x7ff) - ++ radeon_bios16(dev_priv, tmp + 26)); ++ encoder->vsync_width = ((radeon_bios16(dev_priv, tmp + 28) & 0xf800) >> 11); ++ encoder->dotclock = radeon_bios16(dev_priv, tmp + 9) * 10; ++ encoder->flags = 0; ++ } ++ } ++ return true; ++ } ++ DRM_INFO("No panel info found in BIOS\n"); ++ return false; ++ ++} ++ ++bool radeon_combios_get_tmds_info(struct radeon_encoder *encoder) ++{ ++ struct drm_device *dev = encoder->base.dev; ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ uint16_t tmds_info; ++ int i, n; ++ uint8_t ver; ++ ++ tmds_info = combios_get_table_offset(dev, COMBIOS_DFP_INFO_TABLE); ++ ++ if (tmds_info) { ++ ver = radeon_bios8(dev_priv, tmds_info); ++ DRM_INFO("DFP table revision: %d\n", ver); ++ if (ver == 3) { ++ n = radeon_bios8(dev_priv, tmds_info + 5) + 1; ++ if (n > 4) ++ n = 4; ++ for (i = 0; i < n; i++) { ++ encoder->tmds_pll[i].value = radeon_bios32(dev_priv, tmds_info + i * 10 + 0x08); ++ encoder->tmds_pll[i].freq = radeon_bios16(dev_priv, tmds_info + i * 10 + 0x10); ++ DRM_DEBUG("TMDS PLL From COMBIOS %u %x\n", ++ encoder->tmds_pll[i].freq, ++ encoder->tmds_pll[i].value); ++ } ++ return true; ++ } else if (ver == 4) { ++ int stride = 0; ++ n = radeon_bios8(dev_priv, tmds_info + 5) + 1; ++ if (n > 4) ++ n = 4; ++ for (i = 0; i < n; i++) { ++ encoder->tmds_pll[i].value = radeon_bios32(dev_priv, tmds_info + stride + 0x08); ++ encoder->tmds_pll[i].freq = radeon_bios16(dev_priv, tmds_info + stride + 0x10); ++ if (i == 0) ++ stride += 10; ++ else ++ stride += 6; ++ DRM_DEBUG("TMDS PLL From COMBIOS %u %x\n", ++ encoder->tmds_pll[i].freq, ++ encoder->tmds_pll[i].value); ++ } ++ return true; ++ } ++ } ++ ++ DRM_INFO("No TMDS info found in BIOS\n"); ++ return false; ++} ++ ++void radeon_combios_get_ext_tmds_info(struct radeon_encoder *encoder) ++{ ++ struct drm_device *dev = encoder->base.dev; ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ uint16_t ext_tmds_info; ++ uint8_t ver; ++ ++ ext_tmds_info = combios_get_table_offset(dev, COMBIOS_EXT_TMDS_INFO_TABLE); ++ if (ext_tmds_info) { ++ ver = radeon_bios8(dev_priv, ext_tmds_info); ++ DRM_INFO("External TMDS Table revision: %d\n", ver); ++ // TODO ++ } ++} ++ ++static void radeon_apply_legacy_quirks(struct drm_device *dev, int bios_index) ++{ ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ struct radeon_mode_info *mode_info = &dev_priv->mode_info; ++ ++ /* XPRESS DDC quirks */ ++ if ((dev_priv->chip_family == CHIP_RS400 || ++ dev_priv->chip_family == CHIP_RS480) && ++ mode_info->bios_connector[bios_index].ddc_i2c.mask_clk_reg == RADEON_GPIO_CRT2_DDC) { ++ mode_info->bios_connector[bios_index].ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_MONID); ++ } else if ((dev_priv->chip_family == CHIP_RS400 || ++ dev_priv->chip_family == CHIP_RS480) && ++ mode_info->bios_connector[bios_index].ddc_i2c.mask_clk_reg == RADEON_GPIO_MONID) { ++ mode_info->bios_connector[bios_index].ddc_i2c.valid = true; ++ mode_info->bios_connector[bios_index].ddc_i2c.mask_clk_mask = (0x20 << 8); ++ mode_info->bios_connector[bios_index].ddc_i2c.mask_data_mask = 0x80; ++ mode_info->bios_connector[bios_index].ddc_i2c.a_clk_mask = (0x20 << 8); ++ mode_info->bios_connector[bios_index].ddc_i2c.a_data_mask = 0x80; ++ mode_info->bios_connector[bios_index].ddc_i2c.put_clk_mask = (0x20 << 8); ++ mode_info->bios_connector[bios_index].ddc_i2c.put_data_mask = 0x80; ++ mode_info->bios_connector[bios_index].ddc_i2c.get_clk_mask = (0x20 << 8); ++ mode_info->bios_connector[bios_index].ddc_i2c.get_data_mask = 0x80; ++ mode_info->bios_connector[bios_index].ddc_i2c.mask_clk_reg = RADEON_GPIOPAD_MASK; ++ mode_info->bios_connector[bios_index].ddc_i2c.mask_data_reg = RADEON_GPIOPAD_MASK; ++ mode_info->bios_connector[bios_index].ddc_i2c.a_clk_reg = RADEON_GPIOPAD_A; ++ mode_info->bios_connector[bios_index].ddc_i2c.a_data_reg = RADEON_GPIOPAD_A; ++ mode_info->bios_connector[bios_index].ddc_i2c.put_clk_reg = RADEON_GPIOPAD_EN; ++ mode_info->bios_connector[bios_index].ddc_i2c.put_data_reg = RADEON_GPIOPAD_EN; ++ mode_info->bios_connector[bios_index].ddc_i2c.get_clk_reg = RADEON_LCD_GPIO_Y_REG; ++ mode_info->bios_connector[bios_index].ddc_i2c.get_data_reg = RADEON_LCD_GPIO_Y_REG; ++ } ++ ++ /* Certain IBM chipset RN50s have a BIOS reporting two VGAs, ++ one with VGA DDC and one with CRT2 DDC. - kill the CRT2 DDC one */ ++ if (dev->pdev->device == 0x515e && ++ dev->pdev->subsystem_vendor == 0x1014) { ++ if (mode_info->bios_connector[bios_index].connector_type == CONNECTOR_VGA && ++ mode_info->bios_connector[bios_index].ddc_i2c.mask_clk_reg == RADEON_GPIO_CRT2_DDC) { ++ mode_info->bios_connector[bios_index].valid = false; ++ } ++ } ++ ++ /* Some RV100 cards with 2 VGA ports show up with DVI+VGA */ ++ if (dev->pdev->device == 0x5159 && ++ dev->pdev->subsystem_vendor == 0x1002 && ++ dev->pdev->subsystem_device == 0x013a) { ++ if (mode_info->bios_connector[bios_index].connector_type == CONNECTOR_DVI_I) ++ mode_info->bios_connector[bios_index].connector_type = CONNECTOR_VGA; ++ ++ } ++ ++ /* X300 card with extra non-existent DVI port */ ++ if (dev->pdev->device == 0x5B60 && ++ dev->pdev->subsystem_vendor == 0x17af && ++ dev->pdev->subsystem_device == 0x201e && ++ bios_index == 2) { ++ if (mode_info->bios_connector[bios_index].connector_type == CONNECTOR_DVI_I) ++ mode_info->bios_connector[bios_index].valid = false; ++ } ++ ++} ++ ++bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) ++{ ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ struct radeon_mode_info *mode_info = &dev_priv->mode_info; ++ uint32_t conn_info, entry; ++ uint16_t tmp; ++ enum radeon_combios_ddc ddc_type; ++ enum radeon_combios_connector connector_type; ++ int i; ++ ++ DRM_DEBUG("\n"); ++ conn_info = combios_get_table_offset(dev, COMBIOS_CONNECTOR_INFO_TABLE); ++ if (conn_info) { ++ for (i = 0; i < 4; i++) { ++ entry = conn_info + 2 + i * 2; ++ ++ if (!radeon_bios16(dev_priv, entry)) ++ break; ++ ++ mode_info->bios_connector[i].valid = true; ++ ++ tmp = radeon_bios16(dev_priv, entry); ++ ++ connector_type = (tmp >> 12) & 0xf; ++ mode_info->bios_connector[i].connector_type = connector_type; ++ ++ switch(connector_type) { ++ case CONNECTOR_PROPRIETARY_LEGACY: ++ mode_info->bios_connector[i].connector_type = CONNECTOR_DVI_D; ++ break; ++ case CONNECTOR_CRT_LEGACY: ++ mode_info->bios_connector[i].connector_type = CONNECTOR_VGA; ++ break; ++ case CONNECTOR_DVI_I_LEGACY: ++ mode_info->bios_connector[i].connector_type = CONNECTOR_DVI_I; ++ break; ++ case CONNECTOR_DVI_D_LEGACY: ++ mode_info->bios_connector[i].connector_type = CONNECTOR_DVI_D; ++ break; ++ case CONNECTOR_CTV_LEGACY: ++ mode_info->bios_connector[i].connector_type = CONNECTOR_CTV; ++ break; ++ case CONNECTOR_STV_LEGACY: ++ mode_info->bios_connector[i].connector_type = CONNECTOR_STV; ++ break; ++ default: ++ DRM_ERROR("Unknown connector type: %d\n", connector_type); ++ mode_info->bios_connector[i].valid = false; ++ break; ++ } ++ ++ mode_info->bios_connector[i].ddc_i2c.valid = false; ++ ++ ddc_type = (tmp >> 8) & 0xf; ++ switch (ddc_type) { ++ case DDC_MONID: ++ mode_info->bios_connector[i].ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_MONID); ++ break; ++ case DDC_DVI: ++ mode_info->bios_connector[i].ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC); ++ break; ++ case DDC_VGA: ++ mode_info->bios_connector[i].ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC); ++ break; ++ case DDC_CRT2: ++ mode_info->bios_connector[i].ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_CRT2_DDC); ++ break; ++ default: ++ break; ++ } ++ ++ if (tmp & 0x1) ++ mode_info->bios_connector[i].dac_type = DAC_TVDAC; ++ else ++ mode_info->bios_connector[i].dac_type = DAC_PRIMARY; ++ ++ if ((dev_priv->chip_family == CHIP_RS300) || ++ (dev_priv->chip_family == CHIP_RS400) || ++ (dev_priv->chip_family == CHIP_RS480)) ++ mode_info->bios_connector[i].dac_type = DAC_TVDAC; ++ ++ if ((tmp >> 4) & 0x1) ++ mode_info->bios_connector[i].tmds_type = TMDS_EXT; ++ else ++ mode_info->bios_connector[i].tmds_type = TMDS_INT; ++ ++ radeon_apply_legacy_quirks(dev, i); ++ } ++ } else { ++ uint16_t tmds_info = combios_get_table_offset(dev, COMBIOS_DFP_INFO_TABLE); ++ if (tmds_info) { ++ DRM_DEBUG("Found DFP table, assuming DVI connector\n"); ++ ++ mode_info->bios_connector[0].valid = true; ++ mode_info->bios_connector[0].connector_type = CONNECTOR_DVI_I; ++ mode_info->bios_connector[0].dac_type = DAC_PRIMARY; ++ mode_info->bios_connector[0].tmds_type = TMDS_INT; ++ mode_info->bios_connector[0].ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC); ++ } else { ++ DRM_DEBUG("No connector info found\n"); ++ return false; ++ } ++ } ++ ++ if (dev_priv->flags & RADEON_IS_MOBILITY || ++ dev_priv->chip_family == CHIP_RS400 || ++ dev_priv->chip_family == CHIP_RS480) { ++ uint16_t lcd_info = combios_get_table_offset(dev, COMBIOS_LCD_INFO_TABLE); ++ if (lcd_info) { ++ uint16_t lcd_ddc_info = combios_get_table_offset(dev, COMBIOS_LCD_DDC_INFO_TABLE); ++ ++ mode_info->bios_connector[4].valid = true; ++ mode_info->bios_connector[4].connector_type = CONNECTOR_LVDS; ++ mode_info->bios_connector[4].dac_type = DAC_NONE; ++ mode_info->bios_connector[4].tmds_type = TMDS_NONE; ++ mode_info->bios_connector[4].ddc_i2c.valid = false; ++ ++ if (lcd_ddc_info) { ++ ddc_type = radeon_bios8(dev_priv, lcd_ddc_info + 2); ++ switch(ddc_type) { ++ case DDC_MONID: ++ mode_info->bios_connector[4].ddc_i2c = ++ combios_setup_i2c_bus(RADEON_GPIO_MONID); ++ break; ++ case DDC_DVI: ++ mode_info->bios_connector[4].ddc_i2c = ++ combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC); ++ break; ++ case DDC_VGA: ++ mode_info->bios_connector[4].ddc_i2c = ++ combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC); ++ break; ++ case DDC_CRT2: ++ mode_info->bios_connector[4].ddc_i2c = ++ combios_setup_i2c_bus(RADEON_GPIO_CRT2_DDC); ++ break; ++ case DDC_LCD: ++ mode_info->bios_connector[4].ddc_i2c = ++ combios_setup_i2c_bus(RADEON_LCD_GPIO_MASK); ++ mode_info->bios_connector[4].ddc_i2c.mask_clk_mask = ++ radeon_bios32(dev_priv, lcd_ddc_info + 3); ++ mode_info->bios_connector[4].ddc_i2c.mask_data_mask = ++ radeon_bios32(dev_priv, lcd_ddc_info + 7); ++ mode_info->bios_connector[4].ddc_i2c.a_clk_mask = ++ radeon_bios32(dev_priv, lcd_ddc_info + 3); ++ mode_info->bios_connector[4].ddc_i2c.a_data_mask = ++ radeon_bios32(dev_priv, lcd_ddc_info + 7); ++ mode_info->bios_connector[4].ddc_i2c.put_clk_mask = ++ radeon_bios32(dev_priv, lcd_ddc_info + 3); ++ mode_info->bios_connector[4].ddc_i2c.put_data_mask = ++ radeon_bios32(dev_priv, lcd_ddc_info + 7); ++ mode_info->bios_connector[4].ddc_i2c.get_clk_mask = ++ radeon_bios32(dev_priv, lcd_ddc_info + 3); ++ mode_info->bios_connector[4].ddc_i2c.get_data_mask = ++ radeon_bios32(dev_priv, lcd_ddc_info + 7); ++ break; ++ case DDC_GPIO: ++ mode_info->bios_connector[4].ddc_i2c = ++ combios_setup_i2c_bus(RADEON_MDGPIO_EN_REG); ++ mode_info->bios_connector[4].ddc_i2c.mask_clk_mask = ++ radeon_bios32(dev_priv, lcd_ddc_info + 3); ++ mode_info->bios_connector[4].ddc_i2c.mask_data_mask = ++ radeon_bios32(dev_priv, lcd_ddc_info + 7); ++ mode_info->bios_connector[4].ddc_i2c.a_clk_mask = ++ radeon_bios32(dev_priv, lcd_ddc_info + 3); ++ mode_info->bios_connector[4].ddc_i2c.a_data_mask = ++ radeon_bios32(dev_priv, lcd_ddc_info + 7); ++ mode_info->bios_connector[4].ddc_i2c.put_clk_mask = ++ radeon_bios32(dev_priv, lcd_ddc_info + 3); ++ mode_info->bios_connector[4].ddc_i2c.put_data_mask = ++ radeon_bios32(dev_priv, lcd_ddc_info + 7); ++ mode_info->bios_connector[4].ddc_i2c.get_clk_mask = ++ radeon_bios32(dev_priv, lcd_ddc_info + 3); ++ mode_info->bios_connector[4].ddc_i2c.get_data_mask = ++ radeon_bios32(dev_priv, lcd_ddc_info + 7); ++ break; ++ default: ++ break; ++ } ++ DRM_DEBUG("LCD DDC Info Table found!\n"); ++ } ++ } else ++ mode_info->bios_connector[4].ddc_i2c.valid = false; ++ } ++ ++ /* check TV table */ ++ if (dev_priv->chip_family != CHIP_R100 && ++ dev_priv->chip_family != CHIP_R200) { ++ uint32_t tv_info = combios_get_table_offset(dev, COMBIOS_TV_INFO_TABLE); ++ if (tv_info) { ++ if (radeon_bios8(dev_priv, tv_info + 6) == 'T') { ++ mode_info->bios_connector[5].valid = true; ++ mode_info->bios_connector[5].connector_type = CONNECTOR_DIN; ++ mode_info->bios_connector[5].dac_type = DAC_TVDAC; ++ mode_info->bios_connector[5].tmds_type = TMDS_NONE; ++ mode_info->bios_connector[5].ddc_i2c.valid = false; ++ } ++ } ++ } ++ ++ ++ DRM_DEBUG("BIOS Connector table\n"); ++ for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) { ++ if (!mode_info->bios_connector[i].valid) ++ continue; ++ ++ DRM_DEBUG("Port %d: ddc_type 0x%x, dac_type %d, tmds_type %d, connector type %d, hpd_mask %d\n", ++ i, mode_info->bios_connector[i].ddc_i2c.mask_clk_reg, ++ mode_info->bios_connector[i].dac_type, ++ mode_info->bios_connector[i].tmds_type, ++ mode_info->bios_connector[i].connector_type, ++ mode_info->bios_connector[i].hpd_mask); ++ } ++ ++ return true; ++} ++ ++static void combios_parse_mmio_table(struct drm_device *dev, uint16_t offset) ++{ ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ ++ if (offset) { ++ while (radeon_bios16(dev_priv, offset)) { ++ uint16_t cmd = ((radeon_bios16(dev_priv, offset) & 0xe000) >> 13); ++ uint32_t addr = (radeon_bios16(dev_priv, offset) & 0x1fff); ++ uint32_t val, and_mask, or_mask; ++ uint32_t tmp; ++ ++ offset += 2; ++ switch (cmd) { ++ case 0: ++ val = radeon_bios32(dev_priv, offset); ++ offset += 4; ++ RADEON_WRITE(addr, val); ++ break; ++ case 1: ++ val = radeon_bios32(dev_priv, offset); ++ offset += 4; ++ RADEON_WRITE(addr, val); ++ break; ++ case 2: ++ and_mask = radeon_bios32(dev_priv, offset); ++ offset += 4; ++ or_mask = radeon_bios32(dev_priv, offset); ++ offset += 4; ++ tmp = RADEON_READ(addr); ++ tmp &= and_mask; ++ tmp |= or_mask; ++ RADEON_WRITE(addr, tmp); ++ break; ++ case 3: ++ and_mask = radeon_bios32(dev_priv, offset); ++ offset += 4; ++ or_mask = radeon_bios32(dev_priv, offset); ++ offset += 4; ++ tmp = RADEON_READ(addr); ++ tmp &= and_mask; ++ tmp |= or_mask; ++ RADEON_WRITE(addr, tmp); ++ break; ++ case 4: ++ val = radeon_bios16(dev_priv, offset); ++ offset += 2; ++ udelay(val); ++ break; ++ case 5: ++ val = radeon_bios16(dev_priv, offset); ++ offset += 2; ++ switch (addr) { ++ case 8: ++ while (val--) { ++ if (!(RADEON_READ_PLL(dev_priv, RADEON_CLK_PWRMGT_CNTL) & ++ RADEON_MC_BUSY)) ++ break; ++ } ++ break; ++ case 9: ++ while (val--) { ++ if ((RADEON_READ(RADEON_MC_STATUS) & ++ RADEON_MC_IDLE)) ++ break; ++ } ++ break; ++ default: ++ break; ++ } ++ break; ++ default: ++ break; ++ } ++ } ++ } ++} ++ ++static void combios_parse_pll_table(struct drm_device *dev, uint16_t offset) ++{ ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ ++ if (offset) { ++ while (radeon_bios8(dev_priv, offset)) { ++ uint8_t cmd = ((radeon_bios8(dev_priv, offset) & 0xc0) >> 6); ++ uint8_t addr = (radeon_bios8(dev_priv, offset) & 0x3f); ++ uint32_t val, shift, tmp; ++ uint32_t and_mask, or_mask; ++ ++ offset++; ++ switch (cmd) { ++ case 0: ++ val = radeon_bios32(dev_priv, offset); ++ offset += 4; ++ RADEON_WRITE_PLL(dev_priv, addr, val); ++ break; ++ case 1: ++ shift = radeon_bios8(dev_priv, offset) * 8; ++ offset++; ++ and_mask = radeon_bios8(dev_priv, offset) << shift; ++ and_mask |= ~(0xff << shift); ++ offset++; ++ or_mask = radeon_bios8(dev_priv, offset) << shift; ++ offset++; ++ tmp = RADEON_READ_PLL(dev_priv, addr); ++ tmp &= and_mask; ++ tmp |= or_mask; ++ RADEON_WRITE_PLL(dev_priv, addr, tmp); ++ break; ++ case 2: ++ case 3: ++ tmp = 1000; ++ switch (addr) { ++ case 1: ++ udelay(150); ++ break; ++ case 2: ++ udelay(1000); ++ break; ++ case 3: ++ while (tmp--) { ++ if (!(RADEON_READ_PLL(dev_priv, RADEON_CLK_PWRMGT_CNTL) & ++ RADEON_MC_BUSY)) ++ break; ++ } ++ break; ++ case 4: ++ while (tmp--) { ++ if (RADEON_READ_PLL(dev_priv, RADEON_CLK_PWRMGT_CNTL) & ++ RADEON_DLL_READY) ++ break; ++ } ++ break; ++ case 5: ++ tmp = RADEON_READ_PLL(dev_priv, RADEON_CLK_PWRMGT_CNTL); ++ if (tmp & RADEON_CG_NO1_DEBUG_0) { ++#if 0 ++ uint32_t mclk_cntl = RADEON_READ_PLL(RADEON_MCLK_CNTL); ++ mclk_cntl &= 0xffff0000; ++ //mclk_cntl |= 0x00001111; /* ??? */ ++ RADEON_WRITE_PLL(dev_priv, RADEON_MCLK_CNTL, mclk_cntl); ++ udelay(10000); ++#endif ++ RADEON_WRITE_PLL(dev_priv, RADEON_CLK_PWRMGT_CNTL, ++ tmp & ~RADEON_CG_NO1_DEBUG_0); ++ udelay(10000); ++ } ++ break; ++ default: ++ break; ++ } ++ break; ++ default: ++ break; ++ } ++ } ++ } ++} ++ ++static void combios_parse_ram_reset_table(struct drm_device *dev, uint16_t offset) ++{ ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ uint32_t tmp; ++ ++ if (offset) { ++ uint8_t val = radeon_bios8(dev_priv, offset); ++ while (val != 0xff) { ++ offset++; ++ ++ if (val == 0x0f) { ++ uint32_t channel_complete_mask; ++ ++ if (radeon_is_r300(dev_priv)) ++ channel_complete_mask = R300_MEM_PWRUP_COMPLETE; ++ else ++ channel_complete_mask = RADEON_MEM_PWRUP_COMPLETE; ++ tmp = 20000; ++ while (tmp--) { ++ if ((RADEON_READ(RADEON_MEM_STR_CNTL) & ++ channel_complete_mask) == ++ channel_complete_mask) ++ break; ++ } ++ } else { ++ uint32_t or_mask = radeon_bios16(dev_priv, offset); ++ offset += 2; ++ ++ tmp = RADEON_READ(RADEON_MEM_SDRAM_MODE_REG); ++ tmp &= RADEON_SDRAM_MODE_MASK; ++ tmp |= or_mask; ++ RADEON_WRITE(RADEON_MEM_SDRAM_MODE_REG, tmp); ++ ++ or_mask = val << 24; ++ tmp = RADEON_READ(RADEON_MEM_SDRAM_MODE_REG); ++ tmp &= RADEON_B3MEM_RESET_MASK; ++ tmp |= or_mask; ++ RADEON_WRITE(RADEON_MEM_SDRAM_MODE_REG, tmp); ++ } ++ val = radeon_bios8(dev_priv, offset); ++ } ++ } ++} ++ ++void radeon_combios_dyn_clk_setup(struct drm_device *dev, int enable) ++{ ++ uint16_t dyn_clk_info = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE); ++ ++ if (dyn_clk_info) ++ combios_parse_pll_table(dev, dyn_clk_info); ++} ++ ++void radeon_combios_asic_init(struct drm_device *dev) ++{ ++ uint16_t table; ++ ++ /* ASIC INIT 1 */ ++ table = combios_get_table_offset(dev, COMBIOS_ASIC_INIT_1_TABLE); ++ if (table) ++ combios_parse_mmio_table(dev, table); ++ ++ /* PLL INIT */ ++ table = combios_get_table_offset(dev, COMBIOS_PLL_INIT_TABLE); ++ if (table) ++ combios_parse_pll_table(dev, table); ++ ++ /* ASIC INIT 2 */ ++ table = combios_get_table_offset(dev, COMBIOS_ASIC_INIT_2_TABLE); ++ if (table) ++ combios_parse_mmio_table(dev, table); ++ ++ /* ASIC INIT 4 */ ++ table = combios_get_table_offset(dev, COMBIOS_ASIC_INIT_4_TABLE); ++ if (table) ++ combios_parse_mmio_table(dev, table); ++ ++ /* RAM RESET */ ++ table = combios_get_table_offset(dev, COMBIOS_RAM_RESET_TABLE); ++ if (table) ++ combios_parse_ram_reset_table(dev, table); ++ ++ /* ASIC INIT 3 */ ++ table = combios_get_table_offset(dev, COMBIOS_ASIC_INIT_3_TABLE); ++ if (table) ++ combios_parse_mmio_table(dev, table); ++ ++ /* DYN CLK 1 */ ++ table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE); ++ if (table) ++ combios_parse_pll_table(dev, table); ++ ++ /* ASIC INIT 5 */ ++ table = combios_get_table_offset(dev, COMBIOS_ASIC_INIT_5_TABLE); ++ if (table) ++ combios_parse_mmio_table(dev, table); ++ ++} ++ ++void radeon_combios_initialize_bios_scratch_regs(struct drm_device *dev) ++{ ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ uint32_t bios_0_scratch, bios_6_scratch, bios_7_scratch; ++ ++ bios_0_scratch = RADEON_READ(RADEON_BIOS_0_SCRATCH); ++ bios_6_scratch = RADEON_READ(RADEON_BIOS_6_SCRATCH); ++ //bios_7_scratch = RADEON_READ(RADEON_BIOS_7_SCRATCH); ++ ++ /* let the bios control the backlight */ ++ bios_0_scratch &= ~RADEON_DRIVER_BRIGHTNESS_EN; ++ ++ /* tell the bios not to handle mode switching */ ++ bios_6_scratch |= (RADEON_DISPLAY_SWITCHING_DIS | ++ RADEON_ACC_MODE_CHANGE); ++ ++ /* tell the bios a driver is loaded */ ++ //bios_7_scratch |= RADEON_DRV_LOADED; ++ ++ RADEON_WRITE(RADEON_BIOS_0_SCRATCH, bios_0_scratch); ++ RADEON_WRITE(RADEON_BIOS_6_SCRATCH, bios_6_scratch); ++ //RADEON_WRITE(RADEON_BIOS_7_SCRATCH, bios_7_scratch); ++} ++ ++void ++radeon_combios_output_lock(struct drm_encoder *encoder, bool lock) ++{ ++ struct drm_device *dev = encoder->dev; ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ uint32_t bios_6_scratch; ++ ++ bios_6_scratch = RADEON_READ(RADEON_BIOS_6_SCRATCH); ++ ++ if (lock) ++ bios_6_scratch |= RADEON_DRIVER_CRITICAL; ++ else ++ bios_6_scratch &= ~RADEON_DRIVER_CRITICAL; ++ ++ RADEON_WRITE(RADEON_BIOS_6_SCRATCH, bios_6_scratch); ++} +diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c +new file mode 100644 +index 0000000..be1dbae +--- /dev/null ++++ b/drivers/gpu/drm/radeon/radeon_connectors.c +@@ -0,0 +1,392 @@ ++/* ++ * Copyright 2007-8 Advanced Micro Devices, Inc. ++ * Copyright 2008 Red Hat Inc. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: Dave Airlie ++ * Alex Deucher ++ */ ++#include "drmP.h" ++#include "drm_edid.h" ++#include "drm_crtc_helper.h" ++#include "radeon_drm.h" ++#include "radeon_drv.h" ++ ++struct drm_encoder *radeon_best_single_encoder(struct drm_connector *connector) ++{ ++ int enc_id = connector->encoder_ids[0]; ++ struct drm_mode_object *obj; ++ struct drm_encoder *encoder; ++ ++ /* pick the encoder ids */ ++ if (enc_id) { ++ obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER); ++ if (!obj) ++ return NULL; ++ encoder = obj_to_encoder(obj); ++ return encoder; ++ } ++ return NULL; ++} ++ ++static struct drm_display_mode *radeon_fp_native_mode(struct drm_encoder *encoder) ++{ ++ struct drm_device *dev = encoder->dev; ++ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); ++ struct drm_display_mode *mode = NULL; ++ ++ if (radeon_encoder->panel_xres != 0 && ++ radeon_encoder->panel_yres != 0 && ++ radeon_encoder->dotclock != 0) { ++ mode = drm_mode_create(dev); ++ ++ mode->hdisplay = radeon_encoder->panel_xres; ++ mode->vdisplay = radeon_encoder->panel_yres; ++ ++ mode->htotal = mode->hdisplay + radeon_encoder->hblank; ++ mode->hsync_start = mode->hdisplay + radeon_encoder->hoverplus; ++ mode->hsync_end = mode->hsync_start + radeon_encoder->hsync_width; ++ mode->vtotal = mode->vdisplay + radeon_encoder->vblank; ++ mode->vsync_start = mode->vdisplay + radeon_encoder->voverplus; ++ mode->vsync_end = mode->vsync_start + radeon_encoder->vsync_width; ++ mode->clock = radeon_encoder->dotclock; ++ mode->flags = 0; ++ ++ mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER; ++ ++ DRM_DEBUG("Adding native panel mode %dx%d\n", ++ radeon_encoder->panel_xres, radeon_encoder->panel_yres); ++ } ++ return mode; ++} ++ ++int radeon_connector_set_property(struct drm_connector *connector, struct drm_property *property, ++ uint64_t val) ++{ ++ struct drm_device *dev = connector->dev; ++ ++ if (property == dev->mode_config.dpms_property) { ++ if (val > 3) ++ return -EINVAL; ++ ++ drm_helper_set_connector_dpms(connector, val); ++ ++ } ++ return 0; ++} ++ ++ ++static int radeon_lvds_get_modes(struct drm_connector *connector) ++{ ++ struct radeon_connector *radeon_connector = to_radeon_connector(connector); ++ struct drm_encoder *encoder; ++ int ret = 0; ++ struct edid *edid; ++ struct drm_display_mode *mode; ++ ++ if (radeon_connector->ddc_bus) { ++ radeon_i2c_do_lock(radeon_connector, 1); ++ edid = drm_get_edid(connector, &radeon_connector->ddc_bus->adapter); ++ radeon_i2c_do_lock(radeon_connector, 0); ++ if (edid) { ++ drm_mode_connector_update_edid_property(&radeon_connector->base, edid); ++ ret = drm_add_edid_modes(&radeon_connector->base, edid); ++ kfree(edid); ++ if (ret == 0) ++ goto native; ++ return ret; ++ } ++ } ++ ++native: ++ encoder = radeon_best_single_encoder(connector); ++ if (!encoder) ++ return 0; ++ ++ /* we have no EDID modes */ ++ mode = radeon_fp_native_mode(encoder); ++ if (mode) { ++ ret = 1; ++ drm_mode_probed_add(connector, mode); ++ } ++ return ret; ++} ++ ++static int radeon_lvds_mode_valid(struct drm_connector *connector, ++ struct drm_display_mode *mode) ++{ ++ return MODE_OK; ++} ++ ++static enum drm_connector_status radeon_lvds_detect(struct drm_connector *connector) ++{ ++ // check acpi lid status ??? ++ return connector_status_connected; ++} ++ ++static void radeon_connector_destroy(struct drm_connector *connector) ++{ ++ struct radeon_connector *radeon_connector = to_radeon_connector(connector); ++ ++ if (radeon_connector->ddc_bus) ++ radeon_i2c_destroy(radeon_connector->ddc_bus); ++ drm_sysfs_connector_remove(connector); ++ drm_connector_cleanup(connector); ++ kfree(connector); ++} ++ ++struct drm_connector_helper_funcs radeon_lvds_connector_helper_funcs = { ++ .get_modes = radeon_lvds_get_modes, ++ .mode_valid = radeon_lvds_mode_valid, ++ .best_encoder = radeon_best_single_encoder, ++}; ++ ++struct drm_connector_funcs radeon_lvds_connector_funcs = { ++ .detect = radeon_lvds_detect, ++ .fill_modes = drm_helper_probe_single_connector_modes, ++ .destroy = radeon_connector_destroy, ++ .set_property = radeon_connector_set_property, ++}; ++ ++static int radeon_vga_get_modes(struct drm_connector *connector) ++{ ++ struct radeon_connector *radeon_connector = to_radeon_connector(connector); ++ int ret; ++ ++ ret = radeon_ddc_get_modes(radeon_connector); ++ ++ return ret; ++} ++ ++static int radeon_vga_mode_valid(struct drm_connector *connector, ++ struct drm_display_mode *mode) ++{ ++ ++ return MODE_OK; ++} ++ ++static enum drm_connector_status radeon_vga_detect(struct drm_connector *connector) ++{ ++ struct radeon_connector *radeon_connector = to_radeon_connector(connector); ++ struct drm_encoder *encoder; ++ struct drm_encoder_helper_funcs *encoder_funcs; ++ bool ret; ++ ++ radeon_i2c_do_lock(radeon_connector, 1); ++ ret = radeon_ddc_probe(radeon_connector); ++ radeon_i2c_do_lock(radeon_connector, 0); ++ if (ret) ++ return connector_status_connected; ++ ++ /* if EDID fails to a load detect */ ++ encoder = radeon_best_single_encoder(connector); ++ if (!encoder) ++ return connector_status_disconnected; ++ ++ encoder_funcs = encoder->helper_private; ++ return encoder_funcs->detect(encoder, connector); ++} ++ ++struct drm_connector_helper_funcs radeon_vga_connector_helper_funcs = { ++ .get_modes = radeon_vga_get_modes, ++ .mode_valid = radeon_vga_mode_valid, ++ .best_encoder = radeon_best_single_encoder, ++}; ++ ++struct drm_connector_funcs radeon_vga_connector_funcs = { ++ .detect = radeon_vga_detect, ++ .fill_modes = drm_helper_probe_single_connector_modes, ++ .destroy = radeon_connector_destroy, ++ .set_property = radeon_connector_set_property, ++}; ++ ++ ++static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connector) ++{ ++ struct radeon_connector *radeon_connector = to_radeon_connector(connector); ++ struct drm_encoder *encoder; ++ struct drm_encoder_helper_funcs *encoder_funcs; ++ struct drm_mode_object *obj; ++ int i; ++ enum drm_connector_status ret; ++ bool dret; ++ ++ radeon_i2c_do_lock(radeon_connector, 1); ++ dret = radeon_ddc_probe(radeon_connector); ++ radeon_i2c_do_lock(radeon_connector, 0); ++ if (dret) ++ return connector_status_connected; ++ ++ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { ++ if (connector->encoder_ids[i] == 0) ++ break; ++ ++ obj = drm_mode_object_find(connector->dev, connector->encoder_ids[i], DRM_MODE_OBJECT_ENCODER); ++ if (!obj) ++ continue; ++ ++ encoder = obj_to_encoder(obj); ++ ++ encoder_funcs = encoder->helper_private; ++ if (encoder_funcs->detect) { ++ ret = encoder_funcs->detect(encoder, connector); ++ if (ret == connector_status_connected) { ++ radeon_connector->use_digital = 0; ++ return ret; ++ } ++ } ++ } ++ return connector_status_disconnected; ++} ++ ++/* okay need to be smart in here about which encoder to pick */ ++struct drm_encoder *radeon_dvi_encoder(struct drm_connector *connector) ++{ ++ int enc_id = connector->encoder_ids[0]; ++ struct radeon_connector *radeon_connector = to_radeon_connector(connector); ++ struct drm_mode_object *obj; ++ struct drm_encoder *encoder; ++ int i; ++ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { ++ if (connector->encoder_ids[i] == 0) ++ break; ++ ++ obj = drm_mode_object_find(connector->dev, connector->encoder_ids[i], DRM_MODE_OBJECT_ENCODER); ++ if (!obj) ++ continue; ++ ++ encoder = obj_to_encoder(obj); ++ ++ if (radeon_connector->use_digital) { ++ if (encoder->encoder_type == DRM_MODE_ENCODER_TMDS) ++ return encoder; ++ } else { ++ if (encoder->encoder_type == DRM_MODE_ENCODER_DAC || ++ encoder->encoder_type == DRM_MODE_ENCODER_TVDAC) ++ return encoder; ++ } ++ } ++ ++ /* see if we have a default encoder TODO */ ++ ++ /* then check use digitial */ ++ /* pick the first one */ ++ if (enc_id) { ++ obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER); ++ if (!obj) ++ return NULL; ++ encoder = obj_to_encoder(obj); ++ return encoder; ++ } ++ return NULL; ++} ++ ++struct drm_connector_helper_funcs radeon_dvi_connector_helper_funcs = { ++ .get_modes = radeon_vga_get_modes, ++ .mode_valid = radeon_vga_mode_valid, ++ .best_encoder = radeon_dvi_encoder, ++}; ++ ++struct drm_connector_funcs radeon_dvi_connector_funcs = { ++ .detect = radeon_dvi_detect, ++ .fill_modes = drm_helper_probe_single_connector_modes, ++ .set_property = radeon_connector_set_property, ++ .destroy = radeon_connector_destroy, ++}; ++ ++ ++static struct connector_funcs { ++ int conn_id; ++ struct drm_connector_funcs *connector_funcs; ++ struct drm_connector_helper_funcs *helper_funcs; ++ int conn_type; ++ char *i2c_id; ++} connector_fns[] = { ++ { CONNECTOR_NONE, NULL, NULL, DRM_MODE_CONNECTOR_Unknown }, ++ { CONNECTOR_VGA, &radeon_vga_connector_funcs, &radeon_vga_connector_helper_funcs, DRM_MODE_CONNECTOR_VGA , "VGA"}, ++ { CONNECTOR_LVDS, &radeon_lvds_connector_funcs, &radeon_lvds_connector_helper_funcs, DRM_MODE_CONNECTOR_LVDS, "LVDS" }, ++ { CONNECTOR_DVI_A, &radeon_vga_connector_funcs, &radeon_vga_connector_helper_funcs, DRM_MODE_CONNECTOR_DVIA, "DVI" }, ++ { CONNECTOR_DVI_I, &radeon_dvi_connector_funcs, &radeon_dvi_connector_helper_funcs, DRM_MODE_CONNECTOR_DVII, "DVI" }, ++ { CONNECTOR_DVI_D, &radeon_dvi_connector_funcs, &radeon_dvi_connector_helper_funcs, DRM_MODE_CONNECTOR_DVID, "DVI" }, ++ { CONNECTOR_HDMI_TYPE_A, &radeon_dvi_connector_funcs, &radeon_vga_connector_helper_funcs, DRM_MODE_CONNECTOR_HDMIA, "HDMI" }, ++ { CONNECTOR_HDMI_TYPE_B, &radeon_dvi_connector_funcs, &radeon_vga_connector_helper_funcs, DRM_MODE_CONNECTOR_HDMIB, "HDMI" }, ++#if 0 ++ { CONNECTOR_HDMI_TYPE_A, radeon_vga_connector_funcs, radeon_vga_connector_helper_funcs, DRM_MODE_CONNECTOR_VGA }, ++ { CONNECTOR_DVI_D, radeon_vga_connector_funcs, radeon_vga_connector_helper_funcs, DRM_MODE_CONNECTOR_VGA }, ++ ++ { CONNECTOR_STV, radeon_vga_connector_funcs, radeon_vga_connector_helper_funcs, DRM_MODE_CONNECTOR_VGA }, ++ { CONNECTOR_CTV, radeon_vga_connector_funcs, radeon_vga_connector_helper_funcs, DRM_MODE_CONNECTOR_VGA }, ++ { CONNECTOR_DIGITAL, radeon_vga_connector_funcs, radeon_vga_connector_helper_funcs, DRM_MODE_CONNECTOR_VGA }, ++ { CONNECTOR_SCART, radeon_vga_connector_funcs, radeon_vga_connector_helper_funcs, DRM_MODE_CONNECTOR_VGA }, ++ ++ { CONNECTOR_HDMI_TYPE_B, radeon_vga_connector_funcs, radeon_vga_connector_helper_funcs, DRM_MODE_CONNECTOR_VGA }, ++ { CONNECTOR_HDMI_TYPE_B, radeon_vga_connector_funcs, radeon_vga_connector_helper_funcs, DRM_MODE_CONNECTOR_VGA }, ++ { CONNECTOR_HDMI_TYPE_B, radeon_vga_connector_funcs, radeon_vga_connector_helper_funcs, DRM_MODE_CONNECTOR_VGA }, ++ { CONNECTOR_DIN, radeon_vga_connector_funcs, radeon_vga_connector_helper_funcs, DRM_MODE_CONNECTOR_VGA }, ++ { CONNECTOR_DISPLAY_PORT, radeon_vga_connector_funcs, radeon_vga_connector_helper_funcs, DRM_MODE_CONNECTOR_VGA }, ++#endif ++}; ++ ++struct drm_connector *radeon_connector_add(struct drm_device *dev, int bios_index) ++{ ++ struct radeon_connector *radeon_connector; ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ struct radeon_mode_info *mode_info = &dev_priv->mode_info; ++ struct drm_connector *connector; ++ int table_idx; ++ ++ for (table_idx = 0; table_idx < ARRAY_SIZE(connector_fns); table_idx++) { ++ if (connector_fns[table_idx].conn_id == mode_info->bios_connector[bios_index].connector_type) ++ break; ++ } ++ ++ if (table_idx == ARRAY_SIZE(connector_fns)) ++ return NULL; ++ ++ radeon_connector = kzalloc(sizeof(struct radeon_connector), GFP_KERNEL); ++ if (!radeon_connector) { ++ return NULL; ++ } ++ ++ connector = &radeon_connector->base; ++ ++ drm_connector_init(dev, &radeon_connector->base, connector_fns[table_idx].connector_funcs, ++ connector_fns[table_idx].conn_type); ++ ++ drm_connector_helper_add(&radeon_connector->base, connector_fns[table_idx].helper_funcs); ++ ++ if (mode_info->bios_connector[bios_index].ddc_i2c.valid) { ++ radeon_connector->ddc_bus = radeon_i2c_create(dev, &mode_info->bios_connector[bios_index].ddc_i2c, ++ connector_fns[table_idx].i2c_id); ++ if (!radeon_connector->ddc_bus) ++ goto failed; ++ } ++ ++ drm_sysfs_connector_add(connector); ++ return connector; ++ ++ ++failed: ++ if (radeon_connector->ddc_bus) ++ radeon_i2c_destroy(radeon_connector->ddc_bus); ++ drm_connector_cleanup(connector); ++ kfree(connector); ++ return NULL; ++} +diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c +index 63212d7..a400cb9 100644 +--- a/drivers/gpu/drm/radeon/radeon_cp.c ++++ b/drivers/gpu/drm/radeon/radeon_cp.c +@@ -76,7 +76,24 @@ static u32 IGP_READ_MCIND(drm_radeon_private_t *dev_priv, int addr) + ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) + return RS690_READ_MCIND(dev_priv, addr); + else +- return RS480_READ_MCIND(dev_priv, addr); ++ return RS480_READ_MCIND(dev_priv, addr); ++} ++ ++u32 radeon_read_mc_reg(drm_radeon_private_t *dev_priv, int addr) ++{ ++ if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ++ return IGP_READ_MCIND(dev_priv, addr); ++ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) ++ return R500_READ_MCIND(dev_priv, addr); ++ return 0; ++} ++ ++void radeon_write_mc_reg(drm_radeon_private_t *dev_priv, u32 addr, u32 val) ++{ ++ if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ++ IGP_WRITE_MCIND(addr, val); ++ else if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) ++ R500_WRITE_MCIND(addr, val); + } + + u32 radeon_read_fb_location(drm_radeon_private_t *dev_priv) +@@ -87,39 +104,77 @@ u32 radeon_read_fb_location(drm_radeon_private_t *dev_priv) + else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) || + ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) + return RS690_READ_MCIND(dev_priv, RS690_MC_FB_LOCATION); ++ else if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770) ++ return RADEON_READ(R700_MC_VM_FB_LOCATION); ++ else if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) ++ return RADEON_READ(R600_MC_VM_FB_LOCATION); + else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) + return R500_READ_MCIND(dev_priv, R520_MC_FB_LOCATION); + else + return RADEON_READ(RADEON_MC_FB_LOCATION); + } + +-static void radeon_write_fb_location(drm_radeon_private_t *dev_priv, u32 fb_loc) ++void radeon_read_agp_location(drm_radeon_private_t *dev_priv, u32 *agp_lo, u32 *agp_hi) ++{ ++ if (dev_priv->chip_family == CHIP_RV770) { ++ *agp_lo = RADEON_READ(R600_MC_VM_AGP_BOT); ++ *agp_hi = RADEON_READ(R600_MC_VM_AGP_TOP); ++ } else if (dev_priv->chip_family == CHIP_R600) { ++ *agp_lo = RADEON_READ(R600_MC_VM_AGP_BOT); ++ *agp_hi = RADEON_READ(R600_MC_VM_AGP_TOP); ++ } else if (dev_priv->chip_family == CHIP_RV515) { ++ *agp_lo = radeon_read_mc_reg(dev_priv, RV515_MC_AGP_LOCATION); ++ *agp_hi = 0; ++ } else if (dev_priv->chip_family == CHIP_RS600) { ++ *agp_lo = 0; ++ *agp_hi = 0; ++ } else if (dev_priv->chip_family == CHIP_RS690 || ++ dev_priv->chip_family == CHIP_RS740) { ++ *agp_lo = radeon_read_mc_reg(dev_priv, RS690_MC_AGP_LOCATION); ++ *agp_hi = 0; ++ } else if (dev_priv->chip_family >= CHIP_R520) { ++ *agp_lo = radeon_read_mc_reg(dev_priv, R520_MC_AGP_LOCATION); ++ *agp_hi = 0; ++ } else { ++ *agp_lo = RADEON_READ(RADEON_MC_AGP_LOCATION); ++ *agp_hi = 0; ++ } ++} ++ ++void radeon_write_fb_location(drm_radeon_private_t *dev_priv, u32 fb_loc) + { + if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) + R500_WRITE_MCIND(RV515_MC_FB_LOCATION, fb_loc); + else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) || + ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) + RS690_WRITE_MCIND(RS690_MC_FB_LOCATION, fb_loc); ++ else if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770) ++ RADEON_WRITE(R700_MC_VM_FB_LOCATION, fb_loc); ++ else if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) ++ RADEON_WRITE(R600_MC_VM_FB_LOCATION, fb_loc); + else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) + R500_WRITE_MCIND(R520_MC_FB_LOCATION, fb_loc); + else + RADEON_WRITE(RADEON_MC_FB_LOCATION, fb_loc); + } + +-static void radeon_write_agp_location(drm_radeon_private_t *dev_priv, u32 agp_loc) ++void radeon_write_agp_location(drm_radeon_private_t *dev_priv, u32 agp_loc, u32 agp_loc_hi) + { + if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) + R500_WRITE_MCIND(RV515_MC_AGP_LOCATION, agp_loc); + else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) || + ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) + RS690_WRITE_MCIND(RS690_MC_AGP_LOCATION, agp_loc); +- else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) ++ else if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) { ++ RADEON_WRITE(R600_MC_VM_AGP_BOT, agp_loc); ++ RADEON_WRITE(R600_MC_VM_AGP_TOP, agp_loc_hi); ++ } else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) + R500_WRITE_MCIND(R520_MC_AGP_LOCATION, agp_loc); + else + RADEON_WRITE(RADEON_MC_AGP_LOCATION, agp_loc); + } + +-static void radeon_write_agp_base(drm_radeon_private_t *dev_priv, u64 agp_base) ++void radeon_write_agp_base(drm_radeon_private_t *dev_priv, u64 agp_base) + { + u32 agp_base_hi = upper_32_bits(agp_base); + u32 agp_base_lo = agp_base & 0xffffffff; +@@ -145,20 +200,129 @@ static void radeon_write_agp_base(drm_radeon_private_t *dev_priv, u64 agp_base) + } + } + +-static int RADEON_READ_PLL(struct drm_device * dev, int addr) ++void radeon_enable_bm(struct drm_radeon_private *dev_priv) + { +- drm_radeon_private_t *dev_priv = dev->dev_private; ++ u32 tmp; ++ /* Turn on bus mastering */ ++ if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) || ++ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) { ++ /* rs600/rs690/rs740 */ ++ tmp = RADEON_READ(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS; ++ RADEON_WRITE(RADEON_BUS_CNTL, tmp); ++ } else if (((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV350) || ++ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R420) || ++ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS400) || ++ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480)) { ++ /* r1xx, r2xx, r300, r(v)350, r420/r481, rs400/rs480 */ ++ tmp = RADEON_READ(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS; ++ RADEON_WRITE(RADEON_BUS_CNTL, tmp); ++ } /* PCIE cards appears to not need this */ ++} + +- RADEON_WRITE8(RADEON_CLOCK_CNTL_INDEX, addr & 0x1f); +- return RADEON_READ(RADEON_CLOCK_CNTL_DATA); ++void radeon_pll_errata_after_index(struct drm_radeon_private *dev_priv) ++{ ++ if (!(dev_priv->pll_errata & CHIP_ERRATA_PLL_DUMMYREADS)) ++ return; ++ ++ (void)RADEON_READ(RADEON_CLOCK_CNTL_DATA); ++ (void)RADEON_READ(RADEON_CRTC_GEN_CNTL); ++} ++ ++void radeon_pll_errata_after_data(struct drm_radeon_private *dev_priv) ++{ ++ /* This workarounds is necessary on RV100, RS100 and RS200 chips ++ * or the chip could hang on a subsequent access ++ */ ++ if (dev_priv->pll_errata & CHIP_ERRATA_PLL_DELAY) ++ udelay(5000); ++ ++ /* This function is required to workaround a hardware bug in some (all?) ++ * revisions of the R300. This workaround should be called after every ++ * CLOCK_CNTL_INDEX register access. If not, register reads afterward ++ * may not be correct. ++ */ ++ if (dev_priv->pll_errata & CHIP_ERRATA_R300_CG) { ++ uint32_t save, tmp; ++ ++ save = RADEON_READ(RADEON_CLOCK_CNTL_INDEX); ++ tmp = save & ~(0x3f | RADEON_PLL_WR_EN); ++ RADEON_WRITE(RADEON_CLOCK_CNTL_INDEX, tmp); ++ tmp = RADEON_READ(RADEON_CLOCK_CNTL_DATA); ++ RADEON_WRITE(RADEON_CLOCK_CNTL_INDEX, save); ++ } ++} ++ ++u32 RADEON_READ_PLL(struct drm_radeon_private *dev_priv, int addr) ++{ ++ uint32_t data; ++ ++ RADEON_WRITE8(RADEON_CLOCK_CNTL_INDEX, addr & 0x3f); ++ radeon_pll_errata_after_index(dev_priv); ++ data = RADEON_READ(RADEON_CLOCK_CNTL_DATA); ++ radeon_pll_errata_after_data(dev_priv); ++ return data; ++} ++ ++void RADEON_WRITE_PLL(struct drm_radeon_private *dev_priv, int addr, uint32_t data) ++{ ++ RADEON_WRITE8(RADEON_CLOCK_CNTL_INDEX, ((addr & 0x3f) | RADEON_PLL_WR_EN)); ++ radeon_pll_errata_after_index(dev_priv); ++ RADEON_WRITE(RADEON_CLOCK_CNTL_DATA, data); ++ radeon_pll_errata_after_data(dev_priv); + } + +-static u32 RADEON_READ_PCIE(drm_radeon_private_t *dev_priv, int addr) ++u32 RADEON_READ_PCIE(drm_radeon_private_t *dev_priv, int addr) + { + RADEON_WRITE8(RADEON_PCIE_INDEX, addr & 0xff); + return RADEON_READ(RADEON_PCIE_DATA); + } + ++/* ATOM accessor methods */ ++static uint32_t cail_pll_read(struct card_info *info, uint32_t reg) ++{ ++ uint32_t ret = RADEON_READ_PLL(info->dev->dev_private, reg); ++ DRM_DEBUG("(%x) = %x\n", reg, ret); ++ return ret; ++} ++ ++static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val) ++{ ++ DRM_DEBUG("(%x, %x)\n", reg, val); ++ RADEON_WRITE_PLL(info->dev->dev_private, reg, val); ++} ++ ++static uint32_t cail_mc_read(struct card_info *info, uint32_t reg) ++{ ++ uint32_t ret = radeon_read_mc_reg(info->dev->dev_private, reg); ++ ++ /* DRM_DEBUG("(%x) = %x\n", reg, ret); */ ++ return ret; ++} ++ ++static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val) ++{ ++ /* DRM_DEBUG("(%x, %x)\n", reg, val);*/ ++ radeon_write_mc_reg(info->dev->dev_private, reg, val); ++} ++ ++static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val) ++{ ++ drm_radeon_private_t *dev_priv = info->dev->dev_private; ++ ++ // DRM_DEBUG("(%x, %x)\n", reg*4, val); ++ RADEON_WRITE(reg*4, val); ++} ++ ++static uint32_t cail_reg_read(struct card_info *info, uint32_t reg) ++{ ++ uint32_t ret; ++ drm_radeon_private_t *dev_priv = info->dev->dev_private; ++ ++ ret = RADEON_READ(reg*4); ++ // DRM_DEBUG("(%x) = %x\n", reg*4, ret); ++ return ret; ++} ++ + #if RADEON_FIFO_DEBUG + static void radeon_status(drm_radeon_private_t * dev_priv) + { +@@ -241,7 +405,7 @@ static int radeon_do_wait_for_fifo(drm_radeon_private_t * dev_priv, int entries) + return -EBUSY; + } + +-static int radeon_do_wait_for_idle(drm_radeon_private_t * dev_priv) ++int radeon_do_wait_for_idle(drm_radeon_private_t * dev_priv) + { + int i, ret; + +@@ -301,7 +465,7 @@ static void radeon_init_pipes(drm_radeon_private_t *dev_priv) + } + + if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) { +- RADEON_WRITE_PLL(R500_DYN_SCLK_PWMEM_PIPE, (1 | ((gb_pipe_sel >> 8) & 0xf) << 4)); ++ RADEON_WRITE_PLL(dev_priv, R500_DYN_SCLK_PWMEM_PIPE, (1 | ((gb_pipe_sel >> 8) & 0xf) << 4)); + RADEON_WRITE(R500_SU_REG_DEST, ((1 << dev_priv->num_gb_pipes) - 1)); + } + RADEON_WRITE(R300_GB_TILE_CONFIG, gb_tile_config); +@@ -407,7 +571,6 @@ static void radeon_do_cp_flush(drm_radeon_private_t * dev_priv) + DRM_DEBUG("\n"); + #if 0 + u32 tmp; +- + tmp = RADEON_READ(RADEON_CP_RB_WPTR) | (1 << 31); + RADEON_WRITE(RADEON_CP_RB_WPTR, tmp); + #endif +@@ -448,10 +611,15 @@ static void radeon_do_cp_start(drm_radeon_private_t * dev_priv) + BEGIN_RING(8); + /* isync can only be written through cp on r5xx write it here */ + OUT_RING(CP_PACKET0(RADEON_ISYNC_CNTL, 0)); ++ if (dev_priv->chip_family > CHIP_RV280) ++ OUT_RING(RADEON_ISYNC_ANY2D_IDLE3D | ++ RADEON_ISYNC_ANY3D_IDLE2D | ++ RADEON_ISYNC_WAIT_IDLEGUI | ++ dev_priv->mm_enabled ? 0 : RADEON_ISYNC_CPSCRATCH_IDLEGUI); ++ else + OUT_RING(RADEON_ISYNC_ANY2D_IDLE3D | + RADEON_ISYNC_ANY3D_IDLE2D | +- RADEON_ISYNC_WAIT_IDLEGUI | +- RADEON_ISYNC_CPSCRATCH_IDLEGUI); ++ RADEON_ISYNC_WAIT_IDLEGUI); + RADEON_PURGE_CACHE(); + RADEON_PURGE_ZCACHE(); + RADEON_WAIT_UNTIL_IDLE(); +@@ -502,15 +670,15 @@ static int radeon_do_engine_reset(struct drm_device * dev) + if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV410) { + /* may need something similar for newer chips */ + clock_cntl_index = RADEON_READ(RADEON_CLOCK_CNTL_INDEX); +- mclk_cntl = RADEON_READ_PLL(dev, RADEON_MCLK_CNTL); +- +- RADEON_WRITE_PLL(RADEON_MCLK_CNTL, (mclk_cntl | +- RADEON_FORCEON_MCLKA | +- RADEON_FORCEON_MCLKB | +- RADEON_FORCEON_YCLKA | +- RADEON_FORCEON_YCLKB | +- RADEON_FORCEON_MC | +- RADEON_FORCEON_AIC)); ++ mclk_cntl = RADEON_READ_PLL(dev_priv, RADEON_MCLK_CNTL); ++ ++ RADEON_WRITE_PLL(dev_priv, RADEON_MCLK_CNTL, (mclk_cntl | ++ RADEON_FORCEON_MCLKA | ++ RADEON_FORCEON_MCLKB | ++ RADEON_FORCEON_YCLKA | ++ RADEON_FORCEON_YCLKB | ++ RADEON_FORCEON_MC | ++ RADEON_FORCEON_AIC)); + } + + rbbm_soft_reset = RADEON_READ(RADEON_RBBM_SOFT_RESET); +@@ -535,7 +703,7 @@ static int radeon_do_engine_reset(struct drm_device * dev) + RADEON_READ(RADEON_RBBM_SOFT_RESET); + + if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV410) { +- RADEON_WRITE_PLL(RADEON_MCLK_CNTL, mclk_cntl); ++ RADEON_WRITE_PLL(dev_priv, RADEON_MCLK_CNTL, mclk_cntl); + RADEON_WRITE(RADEON_CLOCK_CNTL_INDEX, clock_cntl_index); + RADEON_WRITE(RADEON_RBBM_SOFT_RESET, rbbm_soft_reset); + } +@@ -551,7 +719,8 @@ static int radeon_do_engine_reset(struct drm_device * dev) + dev_priv->cp_running = 0; + + /* Reset any pending vertex, indirect buffers */ +- radeon_freelist_reset(dev); ++ if (dev->dma) ++ radeon_freelist_reset(dev); + + return 0; + } +@@ -560,7 +729,6 @@ static void radeon_cp_init_ring_buffer(struct drm_device * dev, + drm_radeon_private_t * dev_priv) + { + u32 ring_start, cur_read_ptr; +- u32 tmp; + + /* Initialize the memory controller. With new memory map, the fb location + * is not changed, it should have been properly initialized already. Part +@@ -569,9 +737,13 @@ static void radeon_cp_init_ring_buffer(struct drm_device * dev, + */ + if (!dev_priv->new_memmap) + radeon_write_fb_location(dev_priv, +- ((dev_priv->gart_vm_start - 1) & 0xffff0000) +- | (dev_priv->fb_location >> 16)); +- ++ ((dev_priv->gart_vm_start - 1) & 0xffff0000) ++ | (dev_priv->fb_location >> 16)); ++ ++ if (dev_priv->mm.ring.bo) { ++ ring_start = dev_priv->mm.ring.bo->offset + ++ dev_priv->gart_vm_start; ++ } else + #if __OS_HAS_AGP + if (dev_priv->flags & RADEON_IS_AGP) { + radeon_write_agp_base(dev_priv, dev->agp->base); +@@ -579,7 +751,7 @@ static void radeon_cp_init_ring_buffer(struct drm_device * dev, + radeon_write_agp_location(dev_priv, + (((dev_priv->gart_vm_start - 1 + + dev_priv->gart_size) & 0xffff0000) | +- (dev_priv->gart_vm_start >> 16))); ++ (dev_priv->gart_vm_start >> 16)), 0); + + ring_start = (dev_priv->cp_ring->offset + - dev->agp->base +@@ -601,6 +773,12 @@ static void radeon_cp_init_ring_buffer(struct drm_device * dev, + SET_RING_HEAD(dev_priv, cur_read_ptr); + dev_priv->ring.tail = cur_read_ptr; + ++ ++ if (dev_priv->mm.ring_read.bo) { ++ RADEON_WRITE(RADEON_CP_RB_RPTR_ADDR, ++ dev_priv->mm.ring_read.bo->offset + ++ dev_priv->gart_vm_start); ++ } else + #if __OS_HAS_AGP + if (dev_priv->flags & RADEON_IS_AGP) { + RADEON_WRITE(RADEON_CP_RB_RPTR_ADDR, +@@ -647,26 +825,21 @@ static void radeon_cp_init_ring_buffer(struct drm_device * dev, + RADEON_WRITE(RADEON_SCRATCH_ADDR, RADEON_READ(RADEON_CP_RB_RPTR_ADDR) + + RADEON_SCRATCH_REG_OFFSET); + +- dev_priv->scratch = ((__volatile__ u32 *) +- dev_priv->ring_rptr->handle + +- (RADEON_SCRATCH_REG_OFFSET / sizeof(u32))); ++ if (dev_priv->mm.ring_read.bo) ++ dev_priv->scratch = ((__volatile__ u32 *) ++ dev_priv->mm.ring_read.kmap.virtual + ++ (RADEON_SCRATCH_REG_OFFSET / sizeof(u32))); ++ else ++ dev_priv->scratch = ((__volatile__ u32 *) ++ dev_priv->ring_rptr->handle + ++ (RADEON_SCRATCH_REG_OFFSET / sizeof(u32))); + +- RADEON_WRITE(RADEON_SCRATCH_UMSK, 0x7); ++ if (dev_priv->chip_family >= CHIP_R300) ++ RADEON_WRITE(RADEON_SCRATCH_UMSK, 0x7f); ++ else ++ RADEON_WRITE(RADEON_SCRATCH_UMSK, 0x1f); + +- /* Turn on bus mastering */ +- if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) || +- ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) { +- /* rs600/rs690/rs740 */ +- tmp = RADEON_READ(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS; +- RADEON_WRITE(RADEON_BUS_CNTL, tmp); +- } else if (((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV350) || +- ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R420) || +- ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS400) || +- ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480)) { +- /* r1xx, r2xx, r300, r(v)350, r420/r481, rs400/rs480 */ +- tmp = RADEON_READ(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS; +- RADEON_WRITE(RADEON_BUS_CNTL, tmp); +- } /* PCIE cards appears to not need this */ ++ radeon_enable_bm(dev_priv); + + dev_priv->scratch[0] = 0; + RADEON_WRITE(RADEON_LAST_FRAME_REG, 0); +@@ -677,32 +850,54 @@ static void radeon_cp_init_ring_buffer(struct drm_device * dev, + dev_priv->scratch[2] = 0; + RADEON_WRITE(RADEON_LAST_CLEAR_REG, 0); + ++ dev_priv->scratch[3] = 0; ++ RADEON_WRITE(RADEON_LAST_SWI_REG, 0); ++ ++ dev_priv->scratch[4] = 0; ++ RADEON_WRITE(RADEON_SCRATCH_REG4, 0); ++ ++ dev_priv->scratch[6] = 0; ++ RADEON_WRITE(RADEON_SCRATCH_REG6, 0); ++ + radeon_do_wait_for_idle(dev_priv); + + /* Sync everything up */ ++ if (dev_priv->chip_family > CHIP_RV280) { + RADEON_WRITE(RADEON_ISYNC_CNTL, + (RADEON_ISYNC_ANY2D_IDLE3D | + RADEON_ISYNC_ANY3D_IDLE2D | + RADEON_ISYNC_WAIT_IDLEGUI | + RADEON_ISYNC_CPSCRATCH_IDLEGUI)); +- ++ } else { ++ RADEON_WRITE(RADEON_ISYNC_CNTL, ++ (RADEON_ISYNC_ANY2D_IDLE3D | ++ RADEON_ISYNC_ANY3D_IDLE2D | ++ RADEON_ISYNC_WAIT_IDLEGUI)); ++ } + } + + static void radeon_test_writeback(drm_radeon_private_t * dev_priv) + { +- u32 tmp; ++ u32 tmp, scratch1_store; ++ void *ring_read_ptr; ++ ++ if (dev_priv->mm.ring_read.bo) ++ ring_read_ptr = dev_priv->mm.ring_read.kmap.virtual; ++ else ++ ring_read_ptr = dev_priv->ring_rptr->handle; + + /* Start with assuming that writeback doesn't work */ + dev_priv->writeback_works = 0; + ++ scratch1_store = RADEON_READ(RADEON_SCRATCH_REG1); + /* Writeback doesn't seem to work everywhere, test it here and possibly + * enable it if it appears to work + */ +- DRM_WRITE32(dev_priv->ring_rptr, RADEON_SCRATCHOFF(1), 0); ++ writel(0, ring_read_ptr + RADEON_SCRATCHOFF(1)); + RADEON_WRITE(RADEON_SCRATCH_REG1, 0xdeadbeef); + + for (tmp = 0; tmp < dev_priv->usec_timeout; tmp++) { +- if (DRM_READ32(dev_priv->ring_rptr, RADEON_SCRATCHOFF(1)) == ++ if (readl(ring_read_ptr + RADEON_SCRATCHOFF(1)) == + 0xdeadbeef) + break; + DRM_UDELAY(1); +@@ -720,10 +915,12 @@ static void radeon_test_writeback(drm_radeon_private_t * dev_priv) + DRM_INFO("writeback forced off\n"); + } + ++ /* write back previous value */ ++ RADEON_WRITE(RADEON_SCRATCH_REG1, scratch1_store); ++ + if (!dev_priv->writeback_works) { +- /* Disable writeback to avoid unnecessary bus master transfer */ +- RADEON_WRITE(RADEON_CP_RB_CNTL, RADEON_READ(RADEON_CP_RB_CNTL) | +- RADEON_RB_NO_UPDATE); ++ /* Disable writeback to avoid unnecessary bus master transfers */ ++ RADEON_WRITE(RADEON_CP_RB_CNTL, RADEON_READ(RADEON_CP_RB_CNTL) | RADEON_RB_NO_UPDATE); + RADEON_WRITE(RADEON_SCRATCH_UMSK, 0); + } + } +@@ -734,10 +931,25 @@ static void radeon_set_igpgart(drm_radeon_private_t * dev_priv, int on) + u32 temp; + + if (on) { ++ u32 size_reg; + DRM_DEBUG("programming igp gart %08X %08lX %08X\n", +- dev_priv->gart_vm_start, +- (long)dev_priv->gart_info.bus_addr, +- dev_priv->gart_size); ++ dev_priv->gart_vm_start, ++ (long)dev_priv->gart_info.bus_addr, ++ dev_priv->gart_size); ++ ++ switch(dev_priv->gart_size/(1024*1024)) { ++ case 32: size_reg = RS480_VA_SIZE_32MB; break; ++ case 64: size_reg = RS480_VA_SIZE_64MB; break; ++ case 128: size_reg = RS480_VA_SIZE_128MB; break; ++ case 256: size_reg = RS480_VA_SIZE_256MB; break; ++ case 512: size_reg = RS480_VA_SIZE_512MB; break; ++ case 1024: size_reg = RS480_VA_SIZE_1GB; break; ++ case 2048: size_reg = RS480_VA_SIZE_2GB; break; ++ default: ++ DRM_ERROR("Unable to use IGP GART table size %d\n", dev_priv->gart_info.table_size); ++ size_reg = RS480_VA_SIZE_32MB; ++ break; ++ } + + temp = IGP_READ_MCIND(dev_priv, RS480_MC_MISC_CNTL); + if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) || +@@ -747,8 +959,7 @@ static void radeon_set_igpgart(drm_radeon_private_t * dev_priv, int on) + else + IGP_WRITE_MCIND(RS480_MC_MISC_CNTL, RS480_GART_INDEX_REG_EN); + +- IGP_WRITE_MCIND(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN | +- RS480_VA_SIZE_32MB)); ++ IGP_WRITE_MCIND(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN | size_reg)); + + temp = IGP_READ_MCIND(dev_priv, RS480_GART_FEATURE_ID); + IGP_WRITE_MCIND(RS480_GART_FEATURE_ID, (RS480_HANG_EN | +@@ -764,24 +975,30 @@ static void radeon_set_igpgart(drm_radeon_private_t * dev_priv, int on) + IGP_WRITE_MCIND(RS480_AGP_MODE_CNTL, ((1 << RS480_REQ_TYPE_SNOOP_SHIFT) | + RS480_REQ_TYPE_SNOOP_DIS)); + +- radeon_write_agp_base(dev_priv, dev_priv->gart_vm_start); ++ if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) { ++ IGP_WRITE_MCIND(RS690_MC_AGP_BASE, ++ (unsigned int)dev_priv->gart_vm_start); ++ IGP_WRITE_MCIND(RS690_MC_AGP_BASE_2, 0); ++ } else { ++ RADEON_WRITE(RADEON_AGP_BASE, (unsigned int)dev_priv->gart_vm_start); ++ RADEON_WRITE(RS480_AGP_BASE_2, 0); ++ } + +- dev_priv->gart_size = 32*1024*1024; +- temp = (((dev_priv->gart_vm_start - 1 + dev_priv->gart_size) & +- 0xffff0000) | (dev_priv->gart_vm_start >> 16)); ++ temp = (((dev_priv->gart_vm_start - 1 + dev_priv->gart_size) & ++ 0xffff0000) | (dev_priv->gart_vm_start >> 16)); + +- radeon_write_agp_location(dev_priv, temp); ++ radeon_write_agp_location(dev_priv, temp, 0); + + temp = IGP_READ_MCIND(dev_priv, RS480_AGP_ADDRESS_SPACE_SIZE); + IGP_WRITE_MCIND(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN | +- RS480_VA_SIZE_32MB)); ++ size_reg)); + + do { + temp = IGP_READ_MCIND(dev_priv, RS480_GART_CACHE_CNTRL); + if ((temp & RS480_GART_CACHE_INVALIDATE) == 0) + break; + DRM_UDELAY(1); +- } while (1); ++ } while(1); + + IGP_WRITE_MCIND(RS480_GART_CACHE_CNTRL, + RS480_GART_CACHE_INVALIDATE); +@@ -791,7 +1008,7 @@ static void radeon_set_igpgart(drm_radeon_private_t * dev_priv, int on) + if ((temp & RS480_GART_CACHE_INVALIDATE) == 0) + break; + DRM_UDELAY(1); +- } while (1); ++ } while(1); + + IGP_WRITE_MCIND(RS480_GART_CACHE_CNTRL, 0); + } else { +@@ -818,7 +1035,7 @@ static void radeon_set_pciegart(drm_radeon_private_t * dev_priv, int on) + dev_priv->gart_vm_start + + dev_priv->gart_size - 1); + +- radeon_write_agp_location(dev_priv, 0xffffffc0); /* ?? */ ++ radeon_write_agp_location(dev_priv, 0xffffffc0, 0); /* ?? */ + + RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_CNTL, + RADEON_PCIE_TX_GART_EN); +@@ -829,7 +1046,7 @@ static void radeon_set_pciegart(drm_radeon_private_t * dev_priv, int on) + } + + /* Enable or disable PCI GART on the chip */ +-static void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on) ++void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on) + { + u32 tmp; + +@@ -863,7 +1080,7 @@ static void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on) + + /* Turn off AGP aperture -- is this required for PCI GART? + */ +- radeon_write_agp_location(dev_priv, 0xffffffc0); ++ radeon_write_agp_location(dev_priv, 0xffffffc0, 0); + RADEON_WRITE(RADEON_AGP_COMMAND, 0); /* clear AGP_COMMAND */ + } else { + RADEON_WRITE(RADEON_AIC_CNTL, +@@ -913,17 +1130,6 @@ static int radeon_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init, + */ + dev_priv->vblank_crtc = DRM_RADEON_VBLANK_CRTC1; + +- switch(init->func) { +- case RADEON_INIT_R200_CP: +- dev_priv->microcode_version = UCODE_R200; +- break; +- case RADEON_INIT_R300_CP: +- dev_priv->microcode_version = UCODE_R300; +- break; +- default: +- dev_priv->microcode_version = UCODE_R100; +- } +- + dev_priv->do_boxes = 0; + dev_priv->cp_mode = init->cp_mode; + +@@ -971,9 +1177,8 @@ static int radeon_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init, + */ + dev_priv->depth_clear.rb3d_cntl = (RADEON_PLANE_MASK_ENABLE | + (dev_priv->color_fmt << 10) | +- (dev_priv->microcode_version == +- UCODE_R100 ? RADEON_ZBLOCK16 : 0)); +- ++ (dev_priv->chip_family < CHIP_R200 ? RADEON_ZBLOCK16 : 0)); ++ + dev_priv->depth_clear.rb3d_zstencilcntl = + (dev_priv->depth_fmt | + RADEON_Z_TEST_ALWAYS | +@@ -1150,8 +1355,7 @@ static int radeon_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init, + dev_priv->ring.rptr_update = /* init->rptr_update */ 4096; + dev_priv->ring.rptr_update_l2qw = drm_order( /* init->rptr_update */ 4096 / 8); + +- dev_priv->ring.fetch_size = /* init->fetch_size */ 32; +- dev_priv->ring.fetch_size_l2ow = drm_order( /* init->fetch_size */ 32 / 16); ++ dev_priv->ring.fetch_size_l2ow = 2; + dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1; + + dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK; +@@ -1166,28 +1370,41 @@ static int radeon_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init, + dev_priv->gart_info.table_mask = DMA_BIT_MASK(32); + /* if we have an offset set from userspace */ + if (dev_priv->pcigart_offset_set) { +- dev_priv->gart_info.bus_addr = +- dev_priv->pcigart_offset + dev_priv->fb_location; +- dev_priv->gart_info.mapping.offset = +- dev_priv->pcigart_offset + dev_priv->fb_aper_offset; +- dev_priv->gart_info.mapping.size = +- dev_priv->gart_info.table_size; +- +- drm_core_ioremap_wc(&dev_priv->gart_info.mapping, dev); +- dev_priv->gart_info.addr = +- dev_priv->gart_info.mapping.handle; +- +- if (dev_priv->flags & RADEON_IS_PCIE) +- dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCIE; +- else +- dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCI; +- dev_priv->gart_info.gart_table_location = +- DRM_ATI_GART_FB; + +- DRM_DEBUG("Setting phys_pci_gart to %p %08lX\n", +- dev_priv->gart_info.addr, +- dev_priv->pcigart_offset); ++ /* if it came from userspace - remap it */ ++ if (dev_priv->pcigart_offset_set == 1) { ++ dev_priv->gart_info.bus_addr = ++ dev_priv->pcigart_offset + dev_priv->fb_location; ++ dev_priv->gart_info.mapping.offset = ++ dev_priv->pcigart_offset + dev_priv->fb_aper_offset; ++ dev_priv->gart_info.mapping.size = ++ dev_priv->gart_info.table_size; ++ ++ /* this is done by the mm now */ ++ drm_core_ioremap(&dev_priv->gart_info.mapping, dev); ++ dev_priv->gart_info.addr = ++ dev_priv->gart_info.mapping.handle; ++ ++ memset(dev_priv->gart_info.addr, 0, dev_priv->gart_info.table_size); ++ if (dev_priv->flags & RADEON_IS_PCIE) ++ dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCIE; ++ else ++ dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCI; ++ dev_priv->gart_info.gart_table_location = ++ DRM_ATI_GART_FB; ++ ++ DRM_DEBUG("Setting phys_pci_gart to %p %08lX\n", ++ dev_priv->gart_info.addr, ++ dev_priv->pcigart_offset); ++ } + } else { ++ ++ if (dev_priv->flags & RADEON_IS_PCIE) { ++ DRM_ERROR ++ ("Cannot use PCI Express without GART in FB memory\n"); ++ radeon_do_cleanup_cp(dev); ++ return -EINVAL; ++ } + if (dev_priv->flags & RADEON_IS_IGPGART) + dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_IGP; + else +@@ -1196,12 +1413,7 @@ static int radeon_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init, + DRM_ATI_GART_MAIN; + dev_priv->gart_info.addr = NULL; + dev_priv->gart_info.bus_addr = 0; +- if (dev_priv->flags & RADEON_IS_PCIE) { +- DRM_ERROR +- ("Cannot use PCI Express without GART in FB memory\n"); +- radeon_do_cleanup_cp(dev); +- return -EINVAL; +- } ++ + } + + if (!drm_ati_pcigart_init(dev, &dev_priv->gart_info)) { +@@ -1214,6 +1426,9 @@ static int radeon_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init, + radeon_set_pcigart(dev_priv, 1); + } + ++ /* Start with assuming that writeback doesn't work */ ++ dev_priv->writeback_works = 0; ++ + radeon_cp_load_microcode(dev_priv); + radeon_cp_init_ring_buffer(dev, dev_priv); + +@@ -1258,14 +1473,16 @@ static int radeon_do_cleanup_cp(struct drm_device * dev) + if (dev_priv->gart_info.bus_addr) { + /* Turn off PCI GART */ + radeon_set_pcigart(dev_priv, 0); +- if (!drm_ati_pcigart_cleanup(dev, &dev_priv->gart_info)) +- DRM_ERROR("failed to cleanup PCI GART!\n"); ++ drm_ati_pcigart_cleanup(dev, &dev_priv->gart_info); + } + + if (dev_priv->gart_info.gart_table_location == DRM_ATI_GART_FB) + { +- drm_core_ioremapfree(&dev_priv->gart_info.mapping, dev); +- dev_priv->gart_info.addr = 0; ++ if (dev_priv->pcigart_offset_set == 1) { ++ drm_core_ioremapfree(&dev_priv->gart_info.mapping, dev); ++ dev_priv->gart_info.addr = NULL; ++ dev_priv->pcigart_offset_set = 0; ++ } + } + } + /* only clear to the start of flags */ +@@ -1317,6 +1534,10 @@ static int radeon_do_resume_cp(struct drm_device * dev) + int radeon_cp_init(struct drm_device *dev, void *data, struct drm_file *file_priv) + { + drm_radeon_init_t *init = data; ++ ++ /* on a modesetting driver ignore this stuff */ ++ if (drm_core_check_feature(dev, DRIVER_MODESET)) ++ return 0; + + LOCK_TEST_WITH_RETURN(dev, file_priv); + +@@ -1340,6 +1561,9 @@ int radeon_cp_start(struct drm_device *dev, void *data, struct drm_file *file_pr + drm_radeon_private_t *dev_priv = dev->dev_private; + DRM_DEBUG("\n"); + ++ if (drm_core_check_feature(dev, DRIVER_MODESET)) ++ return 0; ++ + LOCK_TEST_WITH_RETURN(dev, file_priv); + + if (dev_priv->cp_running) { +@@ -1367,6 +1591,9 @@ int radeon_cp_stop(struct drm_device *dev, void *data, struct drm_file *file_pri + int ret; + DRM_DEBUG("\n"); + ++ if (drm_core_check_feature(dev, DRIVER_MODESET)) ++ return 0; ++ + LOCK_TEST_WITH_RETURN(dev, file_priv); + + if (!dev_priv->cp_running) +@@ -1405,6 +1632,9 @@ void radeon_do_release(struct drm_device * dev) + drm_radeon_private_t *dev_priv = dev->dev_private; + int i, ret; + ++ if (drm_core_check_feature(dev, DRIVER_MODESET)) ++ return; ++ + if (dev_priv) { + if (dev_priv->cp_running) { + /* Stop the cp */ +@@ -1438,6 +1668,9 @@ void radeon_do_release(struct drm_device * dev) + radeon_mem_takedown(&(dev_priv->gart_heap)); + radeon_mem_takedown(&(dev_priv->fb_heap)); + ++ if (dev_priv->user_mm_enable) ++ radeon_gem_mm_fini(dev); ++ + /* deallocate kernel resources */ + radeon_do_cleanup_cp(dev); + } +@@ -1450,6 +1683,9 @@ int radeon_cp_reset(struct drm_device *dev, void *data, struct drm_file *file_pr + drm_radeon_private_t *dev_priv = dev->dev_private; + DRM_DEBUG("\n"); + ++ if (drm_core_check_feature(dev, DRIVER_MODESET)) ++ return 0; ++ + LOCK_TEST_WITH_RETURN(dev, file_priv); + + if (!dev_priv) { +@@ -1470,7 +1706,9 @@ int radeon_cp_idle(struct drm_device *dev, void *data, struct drm_file *file_pri + drm_radeon_private_t *dev_priv = dev->dev_private; + DRM_DEBUG("\n"); + +- LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ if (!drm_core_check_feature(dev, DRIVER_MODESET)) ++ LOCK_TEST_WITH_RETURN(dev, file_priv); + + return radeon_do_cp_idle(dev_priv); + } +@@ -1480,6 +1718,9 @@ int radeon_cp_idle(struct drm_device *dev, void *data, struct drm_file *file_pri + int radeon_cp_resume(struct drm_device *dev, void *data, struct drm_file *file_priv) + { + ++ if (drm_core_check_feature(dev, DRIVER_MODESET)) ++ return 0; ++ + return radeon_do_resume_cp(dev); + } + +@@ -1487,6 +1728,9 @@ int radeon_engine_reset(struct drm_device *dev, void *data, struct drm_file *fil + { + DRM_DEBUG("\n"); + ++ if (drm_core_check_feature(dev, DRIVER_MODESET)) ++ return 0; ++ + LOCK_TEST_WITH_RETURN(dev, file_priv); + + return radeon_do_engine_reset(dev); +@@ -1709,6 +1953,821 @@ int radeon_cp_buffers(struct drm_device *dev, void *data, struct drm_file *file_ + return ret; + } + ++static void radeon_get_vram_type(struct drm_device *dev) ++{ ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ uint32_t tmp; ++ ++ if (dev_priv->flags & RADEON_IS_IGP || (dev_priv->chip_family >= CHIP_R300)) ++ dev_priv->is_ddr = true; ++ else if (RADEON_READ(RADEON_MEM_SDRAM_MODE_REG) & RADEON_MEM_CFG_TYPE_DDR) ++ dev_priv->is_ddr = true; ++ else ++ dev_priv->is_ddr = false; ++ ++ if ((dev_priv->chip_family >= CHIP_R600) && ++ (dev_priv->chip_family <= CHIP_RV635)) { ++ int chansize; ++ ++ tmp = RADEON_READ(R600_RAMCFG); ++ if (tmp & R600_CHANSIZE_OVERRIDE) ++ chansize = 16; ++ else if (tmp & R600_CHANSIZE) ++ chansize = 64; ++ else ++ chansize = 32; ++ ++ if (dev_priv->chip_family == CHIP_R600) ++ dev_priv->ram_width = 8 * chansize; ++ else if (dev_priv->chip_family == CHIP_RV670) ++ dev_priv->ram_width = 4 * chansize; ++ else if ((dev_priv->chip_family == CHIP_RV610) || ++ (dev_priv->chip_family == CHIP_RV620)) ++ dev_priv->ram_width = chansize; ++ else if ((dev_priv->chip_family == CHIP_RV630) || ++ (dev_priv->chip_family == CHIP_RV635)) ++ dev_priv->ram_width = 2 * chansize; ++ } else if (dev_priv->chip_family == CHIP_RV515) { ++ tmp = radeon_read_mc_reg(dev_priv, RV515_MC_CNTL); ++ tmp &= RV515_MEM_NUM_CHANNELS_MASK; ++ switch (tmp) { ++ case 0: dev_priv->ram_width = 64; break; ++ case 1: dev_priv->ram_width = 128; break; ++ default: dev_priv->ram_width = 128; break; ++ } ++ } else if ((dev_priv->chip_family >= CHIP_R520) && ++ (dev_priv->chip_family <= CHIP_RV570)) { ++ tmp = radeon_read_mc_reg(dev_priv, R520_MC_CNTL0); ++ switch ((tmp & R520_MEM_NUM_CHANNELS_MASK) >> R520_MEM_NUM_CHANNELS_SHIFT) { ++ case 0: dev_priv->ram_width = 32; break; ++ case 1: dev_priv->ram_width = 64; break; ++ case 2: dev_priv->ram_width = 128; break; ++ case 3: dev_priv->ram_width = 256; break; ++ default: dev_priv->ram_width = 128; break; ++ } ++ } else if ((dev_priv->chip_family == CHIP_RV100) || ++ (dev_priv->chip_family == CHIP_RS100) || ++ (dev_priv->chip_family == CHIP_RS200)) { ++ tmp = RADEON_READ(RADEON_MEM_CNTL); ++ if (tmp & RV100_HALF_MODE) ++ dev_priv->ram_width = 32; ++ else ++ dev_priv->ram_width = 64; ++ ++ if (dev_priv->flags & RADEON_SINGLE_CRTC) { ++ dev_priv->ram_width /= 4; ++ dev_priv->is_ddr = true; ++ } ++ } else if (dev_priv->chip_family <= CHIP_RV280) { ++ tmp = RADEON_READ(RADEON_MEM_CNTL); ++ if (tmp & RADEON_MEM_NUM_CHANNELS_MASK) ++ dev_priv->ram_width = 128; ++ else ++ dev_priv->ram_width = 64; ++ } else { ++ /* newer IGPs */ ++ dev_priv->ram_width = 128; ++ } ++ DRM_DEBUG("RAM width %d bits %cDR\n", dev_priv->ram_width, dev_priv->is_ddr ? 'D' : 'S'); ++} ++ ++static void radeon_force_some_clocks(struct drm_device *dev) ++{ ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ uint32_t tmp; ++ ++ tmp = RADEON_READ_PLL(dev_priv, RADEON_SCLK_CNTL); ++ tmp |= RADEON_SCLK_FORCE_CP | RADEON_SCLK_FORCE_VIP; ++ RADEON_WRITE_PLL(dev_priv, RADEON_SCLK_CNTL, tmp); ++} ++ ++static void radeon_set_dynamic_clock(struct drm_device *dev, int mode) ++{ ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ uint32_t tmp; ++ ++ switch(mode) { ++ case 0: ++ if (dev_priv->flags & RADEON_SINGLE_CRTC) { ++ tmp = RADEON_READ_PLL(dev_priv, RADEON_SCLK_CNTL); ++ tmp |= (RADEON_SCLK_FORCE_CP | RADEON_SCLK_FORCE_HDP | ++ RADEON_SCLK_FORCE_DISP1 | RADEON_SCLK_FORCE_TOP | ++ RADEON_SCLK_FORCE_E2 | RADEON_SCLK_FORCE_SE | ++ RADEON_SCLK_FORCE_IDCT | RADEON_SCLK_FORCE_VIP | ++ RADEON_SCLK_FORCE_RE | RADEON_SCLK_FORCE_PB | ++ RADEON_SCLK_FORCE_TAM | RADEON_SCLK_FORCE_TDM | ++ RADEON_SCLK_FORCE_RB); ++ RADEON_WRITE_PLL(dev_priv, RADEON_SCLK_CNTL, tmp); ++ } else if (dev_priv->chip_family == CHIP_RV350) { ++ /* for RV350/M10, no delays are required. */ ++ tmp = RADEON_READ_PLL(dev_priv, R300_SCLK_CNTL2); ++ tmp |= (R300_SCLK_FORCE_TCL | ++ R300_SCLK_FORCE_GA | ++ R300_SCLK_FORCE_CBA); ++ RADEON_WRITE_PLL(dev_priv, R300_SCLK_CNTL2, tmp); ++ ++ tmp = RADEON_READ_PLL(dev_priv, RADEON_SCLK_CNTL); ++ tmp &= ~(RADEON_SCLK_FORCE_DISP2 | RADEON_SCLK_FORCE_CP | ++ RADEON_SCLK_FORCE_HDP | RADEON_SCLK_FORCE_DISP1 | ++ RADEON_SCLK_FORCE_TOP | RADEON_SCLK_FORCE_E2 | ++ R300_SCLK_FORCE_VAP | RADEON_SCLK_FORCE_IDCT | ++ RADEON_SCLK_FORCE_VIP | R300_SCLK_FORCE_SR | ++ R300_SCLK_FORCE_PX | R300_SCLK_FORCE_TX | ++ R300_SCLK_FORCE_US | RADEON_SCLK_FORCE_TV_SCLK | ++ R300_SCLK_FORCE_SU | RADEON_SCLK_FORCE_OV0); ++ tmp |= RADEON_DYN_STOP_LAT_MASK; ++ RADEON_WRITE_PLL(dev_priv, RADEON_SCLK_CNTL, tmp); ++ ++ tmp = RADEON_READ_PLL(dev_priv, RADEON_SCLK_MORE_CNTL); ++ tmp &= ~RADEON_SCLK_MORE_FORCEON; ++ tmp |= RADEON_SCLK_MORE_MAX_DYN_STOP_LAT; ++ RADEON_WRITE_PLL(dev_priv, RADEON_SCLK_MORE_CNTL, tmp); ++ ++ tmp = RADEON_READ_PLL(dev_priv, RADEON_VCLK_ECP_CNTL); ++ tmp |= (RADEON_PIXCLK_ALWAYS_ONb | ++ RADEON_PIXCLK_DAC_ALWAYS_ONb); ++ RADEON_WRITE_PLL(dev_priv, RADEON_VCLK_ECP_CNTL, tmp); ++ ++ tmp = RADEON_READ_PLL(dev_priv, RADEON_PIXCLKS_CNTL); ++ tmp |= (RADEON_PIX2CLK_ALWAYS_ONb | ++ RADEON_PIX2CLK_DAC_ALWAYS_ONb | ++ RADEON_DISP_TVOUT_PIXCLK_TV_ALWAYS_ONb | ++ R300_DVOCLK_ALWAYS_ONb | ++ RADEON_PIXCLK_BLEND_ALWAYS_ONb | ++ RADEON_PIXCLK_GV_ALWAYS_ONb | ++ R300_PIXCLK_DVO_ALWAYS_ONb | ++ RADEON_PIXCLK_LVDS_ALWAYS_ONb | ++ RADEON_PIXCLK_TMDS_ALWAYS_ONb | ++ R300_PIXCLK_TRANS_ALWAYS_ONb | ++ R300_PIXCLK_TVO_ALWAYS_ONb | ++ R300_P2G2CLK_ALWAYS_ONb | ++ R300_P2G2CLK_ALWAYS_ONb); ++ RADEON_WRITE_PLL(dev_priv, RADEON_PIXCLKS_CNTL, tmp); ++ } else { ++ tmp = RADEON_READ_PLL(dev_priv, RADEON_SCLK_CNTL); ++ tmp |= (RADEON_SCLK_FORCE_CP | RADEON_SCLK_FORCE_E2); ++ tmp |= RADEON_SCLK_FORCE_SE; ++ ++ if ( dev_priv->flags & RADEON_SINGLE_CRTC ) { ++ tmp |= ( RADEON_SCLK_FORCE_RB | ++ RADEON_SCLK_FORCE_TDM | ++ RADEON_SCLK_FORCE_TAM | ++ RADEON_SCLK_FORCE_PB | ++ RADEON_SCLK_FORCE_RE | ++ RADEON_SCLK_FORCE_VIP | ++ RADEON_SCLK_FORCE_IDCT | ++ RADEON_SCLK_FORCE_TOP | ++ RADEON_SCLK_FORCE_DISP1 | ++ RADEON_SCLK_FORCE_DISP2 | ++ RADEON_SCLK_FORCE_HDP ); ++ } else if ((dev_priv->chip_family == CHIP_R300) || ++ (dev_priv->chip_family == CHIP_R350)) { ++ tmp |= ( RADEON_SCLK_FORCE_HDP | ++ RADEON_SCLK_FORCE_DISP1 | ++ RADEON_SCLK_FORCE_DISP2 | ++ RADEON_SCLK_FORCE_TOP | ++ RADEON_SCLK_FORCE_IDCT | ++ RADEON_SCLK_FORCE_VIP); ++ } ++ ++ RADEON_WRITE_PLL(dev_priv, RADEON_SCLK_CNTL, tmp); ++ ++ udelay(16000); ++ ++ if ((dev_priv->chip_family == CHIP_R300) || ++ (dev_priv->chip_family == CHIP_R350)) { ++ tmp = RADEON_READ_PLL(dev_priv, R300_SCLK_CNTL2); ++ tmp |= ( R300_SCLK_FORCE_TCL | ++ R300_SCLK_FORCE_GA | ++ R300_SCLK_FORCE_CBA); ++ RADEON_WRITE_PLL(dev_priv, R300_SCLK_CNTL2, tmp); ++ udelay(16000); ++ } ++ ++ if (dev_priv->flags & RADEON_IS_IGP) { ++ tmp = RADEON_READ_PLL(dev_priv, RADEON_MCLK_CNTL); ++ tmp &= ~(RADEON_FORCEON_MCLKA | ++ RADEON_FORCEON_YCLKA); ++ RADEON_WRITE_PLL(dev_priv, RADEON_MCLK_CNTL, tmp); ++ udelay(16000); ++ } ++ ++ if ((dev_priv->chip_family == CHIP_RV200) || ++ (dev_priv->chip_family == CHIP_RV250) || ++ (dev_priv->chip_family == CHIP_RV280)) { ++ tmp = RADEON_READ_PLL(dev_priv, RADEON_SCLK_MORE_CNTL); ++ tmp |= RADEON_SCLK_MORE_FORCEON; ++ RADEON_WRITE_PLL(dev_priv, RADEON_SCLK_MORE_CNTL, tmp); ++ udelay(16000); ++ } ++ ++ tmp = RADEON_READ_PLL(dev_priv, RADEON_PIXCLKS_CNTL); ++ tmp &= ~(RADEON_PIX2CLK_ALWAYS_ONb | ++ RADEON_PIX2CLK_DAC_ALWAYS_ONb | ++ RADEON_PIXCLK_BLEND_ALWAYS_ONb | ++ RADEON_PIXCLK_GV_ALWAYS_ONb | ++ RADEON_PIXCLK_DIG_TMDS_ALWAYS_ONb | ++ RADEON_PIXCLK_LVDS_ALWAYS_ONb | ++ RADEON_PIXCLK_TMDS_ALWAYS_ONb); ++ ++ RADEON_WRITE_PLL(dev_priv, RADEON_PIXCLKS_CNTL, tmp); ++ udelay(16000); ++ ++ tmp = RADEON_READ_PLL(dev_priv, RADEON_VCLK_ECP_CNTL); ++ tmp &= ~(RADEON_PIXCLK_ALWAYS_ONb | ++ RADEON_PIXCLK_DAC_ALWAYS_ONb); ++ RADEON_WRITE_PLL(dev_priv, RADEON_VCLK_ECP_CNTL, tmp); ++ } ++ DRM_DEBUG("Dynamic Clock Scaling Disabled\n"); ++ break; ++ case 1: ++ if (dev_priv->flags & RADEON_SINGLE_CRTC) { ++ tmp = RADEON_READ_PLL(dev_priv, RADEON_SCLK_CNTL); ++ if ((RADEON_READ(RADEON_CONFIG_CNTL) & RADEON_CFG_ATI_REV_ID_MASK) > ++ RADEON_CFG_ATI_REV_A13) { ++ tmp &= ~(RADEON_SCLK_FORCE_CP | RADEON_SCLK_FORCE_RB); ++ } ++ tmp &= ~(RADEON_SCLK_FORCE_HDP | RADEON_SCLK_FORCE_DISP1 | ++ RADEON_SCLK_FORCE_TOP | RADEON_SCLK_FORCE_SE | ++ RADEON_SCLK_FORCE_IDCT | RADEON_SCLK_FORCE_RE | ++ RADEON_SCLK_FORCE_PB | RADEON_SCLK_FORCE_TAM | ++ RADEON_SCLK_FORCE_TDM); ++ RADEON_WRITE_PLL(dev_priv, RADEON_SCLK_CNTL, tmp); ++ } else if ((dev_priv->chip_family == CHIP_R300) || ++ (dev_priv->chip_family == CHIP_R350) || ++ (dev_priv->chip_family == CHIP_RV350)) { ++ if (dev_priv->chip_family == CHIP_RV350) { ++ tmp = RADEON_READ_PLL(dev_priv, R300_SCLK_CNTL2); ++ tmp &= ~(R300_SCLK_FORCE_TCL | ++ R300_SCLK_FORCE_GA | ++ R300_SCLK_FORCE_CBA); ++ tmp |= (R300_SCLK_TCL_MAX_DYN_STOP_LAT | ++ R300_SCLK_GA_MAX_DYN_STOP_LAT | ++ R300_SCLK_CBA_MAX_DYN_STOP_LAT); ++ RADEON_WRITE_PLL(dev_priv, R300_SCLK_CNTL2, tmp); ++ ++ tmp = RADEON_READ_PLL(dev_priv, RADEON_SCLK_CNTL); ++ tmp &= ~(RADEON_SCLK_FORCE_DISP2 | RADEON_SCLK_FORCE_CP | ++ RADEON_SCLK_FORCE_HDP | RADEON_SCLK_FORCE_DISP1 | ++ RADEON_SCLK_FORCE_TOP | RADEON_SCLK_FORCE_E2 | ++ R300_SCLK_FORCE_VAP | RADEON_SCLK_FORCE_IDCT | ++ RADEON_SCLK_FORCE_VIP | R300_SCLK_FORCE_SR | ++ R300_SCLK_FORCE_PX | R300_SCLK_FORCE_TX | ++ R300_SCLK_FORCE_US | RADEON_SCLK_FORCE_TV_SCLK | ++ R300_SCLK_FORCE_SU | RADEON_SCLK_FORCE_OV0); ++ tmp |= RADEON_DYN_STOP_LAT_MASK; ++ RADEON_WRITE_PLL(dev_priv, RADEON_SCLK_CNTL, tmp); ++ ++ tmp = RADEON_READ_PLL(dev_priv, RADEON_SCLK_MORE_CNTL); ++ tmp &= ~RADEON_SCLK_MORE_FORCEON; ++ tmp |= RADEON_SCLK_MORE_MAX_DYN_STOP_LAT; ++ RADEON_WRITE_PLL(dev_priv, RADEON_SCLK_MORE_CNTL, tmp); ++ ++ tmp = RADEON_READ_PLL(dev_priv, RADEON_VCLK_ECP_CNTL); ++ tmp |= (RADEON_PIXCLK_ALWAYS_ONb | ++ RADEON_PIXCLK_DAC_ALWAYS_ONb); ++ RADEON_WRITE_PLL(dev_priv, RADEON_VCLK_ECP_CNTL, tmp); ++ ++ tmp = RADEON_READ_PLL(dev_priv, RADEON_PIXCLKS_CNTL); ++ tmp |= (RADEON_PIX2CLK_ALWAYS_ONb | ++ RADEON_PIX2CLK_DAC_ALWAYS_ONb | ++ RADEON_DISP_TVOUT_PIXCLK_TV_ALWAYS_ONb | ++ R300_DVOCLK_ALWAYS_ONb | ++ RADEON_PIXCLK_BLEND_ALWAYS_ONb | ++ RADEON_PIXCLK_GV_ALWAYS_ONb | ++ R300_PIXCLK_DVO_ALWAYS_ONb | ++ RADEON_PIXCLK_LVDS_ALWAYS_ONb | ++ RADEON_PIXCLK_TMDS_ALWAYS_ONb | ++ R300_PIXCLK_TRANS_ALWAYS_ONb | ++ R300_PIXCLK_TVO_ALWAYS_ONb | ++ R300_P2G2CLK_ALWAYS_ONb | ++ R300_P2G2CLK_ALWAYS_ONb); ++ RADEON_WRITE_PLL(dev_priv, RADEON_PIXCLKS_CNTL, tmp); ++ ++ tmp = RADEON_READ_PLL(dev_priv, RADEON_MCLK_MISC); ++ tmp |= (RADEON_MC_MCLK_DYN_ENABLE | ++ RADEON_IO_MCLK_DYN_ENABLE); ++ RADEON_WRITE_PLL(dev_priv, RADEON_MCLK_MISC, tmp); ++ ++ tmp = RADEON_READ_PLL(dev_priv, RADEON_MCLK_CNTL); ++ tmp |= (RADEON_FORCEON_MCLKA | ++ RADEON_FORCEON_MCLKB); ++ ++ tmp &= ~(RADEON_FORCEON_YCLKA | ++ RADEON_FORCEON_YCLKB | ++ RADEON_FORCEON_MC); ++ ++ /* Some releases of vbios have set DISABLE_MC_MCLKA ++ and DISABLE_MC_MCLKB bits in the vbios table. Setting these ++ bits will cause H/W hang when reading video memory with dynamic clocking ++ enabled. */ ++ if ((tmp & R300_DISABLE_MC_MCLKA) && ++ (tmp & R300_DISABLE_MC_MCLKB)) { ++ /* If both bits are set, then check the active channels */ ++ tmp = RADEON_READ_PLL(dev_priv, RADEON_MCLK_CNTL); ++ if (dev_priv->ram_width == 64) { ++ if (RADEON_READ(RADEON_MEM_CNTL) & R300_MEM_USE_CD_CH_ONLY) ++ tmp &= ~R300_DISABLE_MC_MCLKB; ++ else ++ tmp &= ~R300_DISABLE_MC_MCLKA; ++ } else { ++ tmp &= ~(R300_DISABLE_MC_MCLKA | ++ R300_DISABLE_MC_MCLKB); ++ } ++ } ++ ++ RADEON_WRITE_PLL(dev_priv, RADEON_MCLK_CNTL, tmp); ++ } else { ++ tmp = RADEON_READ_PLL(dev_priv, RADEON_SCLK_CNTL); ++ tmp &= ~(R300_SCLK_FORCE_VAP); ++ tmp |= RADEON_SCLK_FORCE_CP; ++ RADEON_WRITE_PLL(dev_priv, RADEON_SCLK_CNTL, tmp); ++ udelay(15000); ++ ++ tmp = RADEON_READ_PLL(dev_priv, R300_SCLK_CNTL2); ++ tmp &= ~(R300_SCLK_FORCE_TCL | ++ R300_SCLK_FORCE_GA | ++ R300_SCLK_FORCE_CBA); ++ RADEON_WRITE_PLL(dev_priv, R300_SCLK_CNTL2, tmp); ++ } ++ } else { ++ tmp = RADEON_READ_PLL(dev_priv, RADEON_CLK_PWRMGT_CNTL); ++ tmp &= ~(RADEON_ACTIVE_HILO_LAT_MASK | ++ RADEON_DISP_DYN_STOP_LAT_MASK | ++ RADEON_DYN_STOP_MODE_MASK); ++ ++ tmp |= (RADEON_ENGIN_DYNCLK_MODE | ++ (0x01 << RADEON_ACTIVE_HILO_LAT_SHIFT)); ++ RADEON_WRITE_PLL(dev_priv, RADEON_CLK_PWRMGT_CNTL, tmp); ++ udelay(15000); ++ ++ tmp = RADEON_READ_PLL(dev_priv, RADEON_CLK_PIN_CNTL); ++ tmp |= RADEON_SCLK_DYN_START_CNTL; ++ RADEON_WRITE_PLL(dev_priv, RADEON_CLK_PIN_CNTL, tmp); ++ udelay(15000); ++ ++ /* When DRI is enabled, setting DYN_STOP_LAT to zero can cause some R200 ++ to lockup randomly, leave them as set by BIOS. ++ */ ++ tmp = RADEON_READ_PLL(dev_priv, RADEON_SCLK_CNTL); ++ /*tmp &= RADEON_SCLK_SRC_SEL_MASK;*/ ++ tmp &= ~RADEON_SCLK_FORCEON_MASK; ++ ++ /*RAGE_6::A11 A12 A12N1 A13, RV250::A11 A12, R300*/ ++ if (((dev_priv->chip_family == CHIP_RV250) && ++ ((RADEON_READ(RADEON_CONFIG_CNTL) & RADEON_CFG_ATI_REV_ID_MASK) < ++ RADEON_CFG_ATI_REV_A13)) || ++ ((dev_priv->chip_family == CHIP_RV100) && ++ ((RADEON_READ(RADEON_CONFIG_CNTL) & RADEON_CFG_ATI_REV_ID_MASK) <= ++ RADEON_CFG_ATI_REV_A13))){ ++ tmp |= RADEON_SCLK_FORCE_CP; ++ tmp |= RADEON_SCLK_FORCE_VIP; ++ } ++ ++ RADEON_WRITE_PLL(dev_priv, RADEON_SCLK_CNTL, tmp); ++ ++ if ((dev_priv->chip_family == CHIP_RV200) || ++ (dev_priv->chip_family == CHIP_RV250) || ++ (dev_priv->chip_family == CHIP_RV280)) { ++ tmp = RADEON_READ_PLL(dev_priv, RADEON_SCLK_MORE_CNTL); ++ tmp &= ~RADEON_SCLK_MORE_FORCEON; ++ ++ /* RV200::A11 A12 RV250::A11 A12 */ ++ if (((dev_priv->chip_family == CHIP_RV200) || ++ (dev_priv->chip_family == CHIP_RV250)) && ++ ((RADEON_READ(RADEON_CONFIG_CNTL) & RADEON_CFG_ATI_REV_ID_MASK) < ++ RADEON_CFG_ATI_REV_A13)) { ++ tmp |= RADEON_SCLK_MORE_FORCEON; ++ } ++ RADEON_WRITE_PLL(dev_priv, RADEON_SCLK_MORE_CNTL, tmp); ++ udelay(15000); ++ } ++ ++ /* RV200::A11 A12, RV250::A11 A12 */ ++ if (((dev_priv->chip_family == CHIP_RV200) || ++ (dev_priv->chip_family == CHIP_RV250)) && ++ ((RADEON_READ(RADEON_CONFIG_CNTL) & RADEON_CFG_ATI_REV_ID_MASK) < ++ RADEON_CFG_ATI_REV_A13)) { ++ tmp = RADEON_READ_PLL(dev_priv, RADEON_PLL_PWRMGT_CNTL); ++ tmp |= RADEON_TCL_BYPASS_DISABLE; ++ RADEON_WRITE_PLL(dev_priv, RADEON_PLL_PWRMGT_CNTL, tmp); ++ } ++ udelay(15000); ++ ++ /*enable dynamic mode for display clocks (PIXCLK and PIX2CLK)*/ ++ tmp = RADEON_READ_PLL(dev_priv, RADEON_PIXCLKS_CNTL); ++ tmp |= (RADEON_PIX2CLK_ALWAYS_ONb | ++ RADEON_PIX2CLK_DAC_ALWAYS_ONb | ++ RADEON_PIXCLK_BLEND_ALWAYS_ONb | ++ RADEON_PIXCLK_GV_ALWAYS_ONb | ++ RADEON_PIXCLK_DIG_TMDS_ALWAYS_ONb | ++ RADEON_PIXCLK_LVDS_ALWAYS_ONb | ++ RADEON_PIXCLK_TMDS_ALWAYS_ONb); ++ ++ RADEON_WRITE_PLL(dev_priv, RADEON_PIXCLKS_CNTL, tmp); ++ udelay(15000); ++ ++ tmp = RADEON_READ_PLL(dev_priv, RADEON_VCLK_ECP_CNTL); ++ tmp |= (RADEON_PIXCLK_ALWAYS_ONb | ++ RADEON_PIXCLK_DAC_ALWAYS_ONb); ++ ++ RADEON_WRITE_PLL(dev_priv, RADEON_VCLK_ECP_CNTL, tmp); ++ udelay(15000); ++ } ++ DRM_DEBUG("Dynamic Clock Scaling Enabled\n"); ++ break; ++ default: ++ break; ++ } ++ ++} ++ ++int radeon_modeset_cp_suspend(struct drm_device *dev) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ int ret; ++ ++ ret = radeon_do_cp_idle(dev_priv); ++ if (ret) ++ DRM_ERROR("failed to idle CP on suspend\n"); ++ ++ radeon_do_cp_stop(dev_priv); ++ radeon_do_engine_reset(dev); ++ if (dev_priv->flags & RADEON_IS_AGP) { ++ } else { ++ radeon_set_pcigart(dev_priv, 0); ++ } ++ ++ return 0; ++} ++ ++int radeon_modeset_cp_resume(struct drm_device *dev) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ ++ radeon_do_wait_for_idle(dev_priv); ++#if __OS_HAS_AGP ++ if (dev_priv->flags & RADEON_IS_AGP) { ++ /* Turn off PCI GART */ ++ radeon_set_pcigart(dev_priv, 0); ++ } else ++#endif ++ { ++ /* Turn on PCI GART */ ++ radeon_set_pcigart(dev_priv, 1); ++ } ++ radeon_gart_flush(dev); ++ ++ radeon_cp_load_microcode(dev_priv); ++ radeon_cp_init_ring_buffer(dev, dev_priv); ++ ++ radeon_do_engine_reset(dev); ++ ++ radeon_test_writeback(dev_priv); ++ ++ radeon_do_cp_start(dev_priv); ++ return 0; ++} ++ ++int radeon_modeset_cp_init(struct drm_device *dev) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ ++ /* allocate a ring and ring rptr bits from GART space */ ++ /* these are allocated in GEM files */ ++ ++ /* Start with assuming that writeback doesn't work */ ++ dev_priv->writeback_works = 0; ++ ++ if (dev_priv->chip_family > CHIP_R600) ++ return 0; ++ ++ dev_priv->usec_timeout = RADEON_DEFAULT_CP_TIMEOUT; ++ dev_priv->ring.size = RADEON_DEFAULT_RING_SIZE; ++ dev_priv->cp_mode = RADEON_CSQ_PRIBM_INDBM; ++ ++ dev_priv->ring.start = (u32 *)(void *)(unsigned long)dev_priv->mm.ring.kmap.virtual; ++ dev_priv->ring.end = (u32 *)(void *)(unsigned long)dev_priv->mm.ring.kmap.virtual + ++ dev_priv->ring.size / sizeof(u32); ++ dev_priv->ring.size_l2qw = drm_order(dev_priv->ring.size / 8); ++ dev_priv->ring.rptr_update = 4096; ++ dev_priv->ring.rptr_update_l2qw = drm_order(4096 / 8); ++ dev_priv->ring.fetch_size_l2ow = 2; /* do what tcore does */ ++ dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1; ++ dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK; ++ ++ dev_priv->new_memmap = true; ++ ++ r300_init_reg_flags(dev); ++ ++ /* turn off HDP read cache for now */ ++ RADEON_WRITE(RADEON_HOST_PATH_CNTL, RADEON_READ(RADEON_HOST_PATH_CNTL) | RADEON_HP_LIN_RD_CACHE_DIS); ++ ++ return radeon_modeset_cp_resume(dev); ++} ++ ++static bool radeon_read_bios(struct drm_device *dev) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ u8 __iomem *bios; ++ size_t size; ++ ++ bios = pci_map_rom(dev->pdev, &size); ++ if (!bios) { ++ return false; ++ } ++ ++ if (size == 0) ++ goto fail; ++ ++ if (bios[0] != 0x55 || bios[1] != 0xaa) ++ goto fail; ++ ++ dev_priv->bios = kmalloc(size, GFP_KERNEL); ++ if (!dev_priv->bios) { ++ pci_unmap_rom(dev->pdev, bios); ++ return -1; ++ } ++ ++ memcpy(dev_priv->bios, bios, size); ++ ++ pci_unmap_rom(dev->pdev, bios); ++ ++ return true; ++fail: ++ pci_unmap_rom(dev->pdev, bios); ++ kfree(dev_priv->bios); ++ dev_priv->bios = NULL; ++ return false; ++} ++ ++static bool radeon_read_disabled_bios(struct drm_device *dev) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ bool ret; ++ ++ if (dev_priv->chip_family >= CHIP_R600) { ++ uint32_t viph_control = RADEON_READ(RADEON_VIPH_CONTROL); ++ uint32_t bus_cntl = RADEON_READ(RADEON_BUS_CNTL); ++ uint32_t d1vga_control = RADEON_READ(AVIVO_D1VGA_CONTROL); ++ uint32_t d2vga_control = RADEON_READ(AVIVO_D2VGA_CONTROL); ++ uint32_t vga_render_control = RADEON_READ(AVIVO_VGA_RENDER_CONTROL); ++ uint32_t rom_cntl = RADEON_READ(R600_ROM_CNTL); ++ uint32_t general_pwrmgt = RADEON_READ(R600_GENERAL_PWRMGT); ++ uint32_t low_vid_lower_gpio_cntl = RADEON_READ(R600_LOW_VID_LOWER_GPIO_CNTL); ++ uint32_t medium_vid_lower_gpio_cntl = RADEON_READ(R600_MEDIUM_VID_LOWER_GPIO_CNTL); ++ uint32_t high_vid_lower_gpio_cntl = RADEON_READ(R600_HIGH_VID_LOWER_GPIO_CNTL); ++ uint32_t ctxsw_vid_lower_gpio_cntl = RADEON_READ(R600_CTXSW_VID_LOWER_GPIO_CNTL); ++ uint32_t lower_gpio_enable = RADEON_READ(R600_LOWER_GPIO_ENABLE); ++ ++ /* disable VIP */ ++ RADEON_WRITE(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN)); ++ ++ /* enable the rom */ ++ RADEON_WRITE(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM)); ++ ++ /* Disable VGA mode */ ++ RADEON_WRITE(AVIVO_D1VGA_CONTROL, (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE | ++ AVIVO_DVGA_CONTROL_TIMING_SELECT))); ++ RADEON_WRITE(AVIVO_D2VGA_CONTROL, (d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE | ++ AVIVO_DVGA_CONTROL_TIMING_SELECT))); ++ RADEON_WRITE(AVIVO_VGA_RENDER_CONTROL, (vga_render_control & ~AVIVO_VGA_VSTATUS_CNTL_MASK)); ++ ++ RADEON_WRITE(R600_ROM_CNTL, ((rom_cntl & ~R600_SCK_PRESCALE_CRYSTAL_CLK_MASK) | ++ (1 << R600_SCK_PRESCALE_CRYSTAL_CLK_SHIFT) | ++ R600_SCK_OVERWRITE)); ++ ++ RADEON_WRITE(R600_GENERAL_PWRMGT, (general_pwrmgt & ~R600_OPEN_DRAIN_PADS)); ++ RADEON_WRITE(R600_LOW_VID_LOWER_GPIO_CNTL, (low_vid_lower_gpio_cntl & ~0x400)); ++ RADEON_WRITE(R600_MEDIUM_VID_LOWER_GPIO_CNTL, (medium_vid_lower_gpio_cntl & ~0x400)); ++ RADEON_WRITE(R600_HIGH_VID_LOWER_GPIO_CNTL, (high_vid_lower_gpio_cntl & ~0x400)); ++ RADEON_WRITE(R600_CTXSW_VID_LOWER_GPIO_CNTL, (ctxsw_vid_lower_gpio_cntl & ~0x400)); ++ RADEON_WRITE(R600_LOWER_GPIO_ENABLE, (lower_gpio_enable | 0x400)); ++ ++ ret = radeon_read_bios(dev); ++ ++ /* restore regs */ ++ RADEON_WRITE(RADEON_VIPH_CONTROL, viph_control); ++ RADEON_WRITE(RADEON_BUS_CNTL, bus_cntl); ++ RADEON_WRITE(AVIVO_D1VGA_CONTROL, d1vga_control); ++ RADEON_WRITE(AVIVO_D2VGA_CONTROL, d2vga_control); ++ RADEON_WRITE(AVIVO_VGA_RENDER_CONTROL, vga_render_control); ++ RADEON_WRITE(R600_ROM_CNTL, rom_cntl); ++ RADEON_WRITE(R600_GENERAL_PWRMGT, general_pwrmgt); ++ RADEON_WRITE(R600_LOW_VID_LOWER_GPIO_CNTL, low_vid_lower_gpio_cntl); ++ RADEON_WRITE(R600_MEDIUM_VID_LOWER_GPIO_CNTL, medium_vid_lower_gpio_cntl); ++ RADEON_WRITE(R600_HIGH_VID_LOWER_GPIO_CNTL, high_vid_lower_gpio_cntl); ++ RADEON_WRITE(R600_CTXSW_VID_LOWER_GPIO_CNTL, ctxsw_vid_lower_gpio_cntl); ++ RADEON_WRITE(R600_LOWER_GPIO_ENABLE, lower_gpio_enable); ++ } else if (dev_priv->chip_family >= CHIP_RS600) { ++ uint32_t seprom_cntl1 = RADEON_READ(RADEON_SEPROM_CNTL1); ++ uint32_t viph_control = RADEON_READ(RADEON_VIPH_CONTROL); ++ uint32_t bus_cntl = RADEON_READ(RADEON_BUS_CNTL); ++ uint32_t d1vga_control = RADEON_READ(AVIVO_D1VGA_CONTROL); ++ uint32_t d2vga_control = RADEON_READ(AVIVO_D2VGA_CONTROL); ++ uint32_t vga_render_control = RADEON_READ(AVIVO_VGA_RENDER_CONTROL); ++ uint32_t gpiopad_a = RADEON_READ(RADEON_GPIOPAD_A); ++ uint32_t gpiopad_en = RADEON_READ(RADEON_GPIOPAD_EN); ++ uint32_t gpiopad_mask = RADEON_READ(RADEON_GPIOPAD_MASK); ++ ++ RADEON_WRITE(RADEON_SEPROM_CNTL1, ((seprom_cntl1 & ~RADEON_SCK_PRESCALE_MASK) | ++ (0xc << RADEON_SCK_PRESCALE_SHIFT))); ++ ++ RADEON_WRITE(RADEON_GPIOPAD_A, 0); ++ RADEON_WRITE(RADEON_GPIOPAD_EN, 0); ++ RADEON_WRITE(RADEON_GPIOPAD_MASK, 0); ++ ++ /* disable VIP */ ++ RADEON_WRITE(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN)); ++ ++ /* enable the rom */ ++ RADEON_WRITE(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM)); ++ ++ /* Disable VGA mode */ ++ RADEON_WRITE(AVIVO_D1VGA_CONTROL, (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE | ++ AVIVO_DVGA_CONTROL_TIMING_SELECT))); ++ RADEON_WRITE(AVIVO_D2VGA_CONTROL, (d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE | ++ AVIVO_DVGA_CONTROL_TIMING_SELECT))); ++ RADEON_WRITE(AVIVO_VGA_RENDER_CONTROL, (vga_render_control & ~AVIVO_VGA_VSTATUS_CNTL_MASK)); ++ ++ ret = radeon_read_bios(dev); ++ ++ /* restore regs */ ++ RADEON_WRITE(RADEON_SEPROM_CNTL1, seprom_cntl1); ++ RADEON_WRITE(RADEON_VIPH_CONTROL, viph_control); ++ RADEON_WRITE(RADEON_BUS_CNTL, bus_cntl); ++ RADEON_WRITE(AVIVO_D1VGA_CONTROL, d1vga_control); ++ RADEON_WRITE(AVIVO_D2VGA_CONTROL, d2vga_control); ++ RADEON_WRITE(AVIVO_VGA_RENDER_CONTROL, vga_render_control); ++ RADEON_WRITE(RADEON_GPIOPAD_A, gpiopad_a); ++ RADEON_WRITE(RADEON_GPIOPAD_EN, gpiopad_en); ++ RADEON_WRITE(RADEON_GPIOPAD_MASK, gpiopad_mask); ++ ++ } else { ++ uint32_t seprom_cntl1 = RADEON_READ(RADEON_SEPROM_CNTL1); ++ uint32_t viph_control = RADEON_READ(RADEON_VIPH_CONTROL); ++ uint32_t bus_cntl = RADEON_READ(RADEON_BUS_CNTL); ++ uint32_t crtc_gen_cntl = RADEON_READ(RADEON_CRTC_GEN_CNTL); ++ uint32_t crtc2_gen_cntl = 0; ++ uint32_t crtc_ext_cntl = RADEON_READ(RADEON_CRTC_EXT_CNTL); ++ uint32_t fp2_gen_cntl = 0; ++ ++ if (dev->pci_device == PCI_DEVICE_ID_ATI_RADEON_QY) ++ fp2_gen_cntl = RADEON_READ(RADEON_FP2_GEN_CNTL); ++ ++ if (!(dev_priv->flags & RADEON_SINGLE_CRTC)) ++ crtc2_gen_cntl = RADEON_READ(RADEON_CRTC2_GEN_CNTL); ++ ++ RADEON_WRITE(RADEON_SEPROM_CNTL1, ((seprom_cntl1 & ~RADEON_SCK_PRESCALE_MASK) | ++ (0xc << RADEON_SCK_PRESCALE_SHIFT))); ++ ++ /* disable VIP */ ++ RADEON_WRITE(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN)); ++ ++ /* enable the rom */ ++ RADEON_WRITE(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM)); ++ ++ /* Turn off mem requests and CRTC for both controllers */ ++ RADEON_WRITE(RADEON_CRTC_GEN_CNTL, ((crtc_gen_cntl & ~RADEON_CRTC_EN) | ++ (RADEON_CRTC_DISP_REQ_EN_B | ++ RADEON_CRTC_EXT_DISP_EN))); ++ if (!(dev_priv->flags & RADEON_SINGLE_CRTC)) ++ RADEON_WRITE(RADEON_CRTC2_GEN_CNTL, ((crtc2_gen_cntl & ~RADEON_CRTC2_EN) | ++ RADEON_CRTC2_DISP_REQ_EN_B)); ++ ++ /* Turn off CRTC */ ++ RADEON_WRITE(RADEON_CRTC_EXT_CNTL, ((crtc_ext_cntl & ~RADEON_CRTC_CRT_ON) | ++ (RADEON_CRTC_SYNC_TRISTAT | ++ RADEON_CRTC_DISPLAY_DIS))); ++ ++ if (dev->pci_device == PCI_DEVICE_ID_ATI_RADEON_QY) ++ RADEON_WRITE(RADEON_FP2_GEN_CNTL, (fp2_gen_cntl & ~RADEON_FP2_ON)); ++ ++ ret = radeon_read_bios(dev); ++ ++ /* restore regs */ ++ RADEON_WRITE(RADEON_SEPROM_CNTL1, seprom_cntl1); ++ RADEON_WRITE(RADEON_VIPH_CONTROL, viph_control); ++ RADEON_WRITE(RADEON_BUS_CNTL, bus_cntl); ++ RADEON_WRITE(RADEON_CRTC_GEN_CNTL, crtc_gen_cntl); ++ if (!(dev_priv->flags & RADEON_SINGLE_CRTC)) ++ RADEON_WRITE(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl); ++ RADEON_WRITE(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl); ++ if (dev->pci_device == PCI_DEVICE_ID_ATI_RADEON_QY) ++ RADEON_WRITE(RADEON_FP2_GEN_CNTL, fp2_gen_cntl); ++ } ++ return ret; ++} ++ ++ ++static bool radeon_get_bios(struct drm_device *dev) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ int ret = 0; ++ uint16_t tmp; ++ ++ ret = radeon_read_bios(dev); ++ if (ret == false) ++ ret = radeon_read_disabled_bios(dev); ++ ++ if (ret == false || !dev_priv->bios) { ++ DRM_ERROR("Unable to locate a BIOS ROM\n"); ++ return false; ++ } ++ ++ if (dev_priv->bios[0] != 0x55 || dev_priv->bios[1] != 0xaa) ++ goto free_bios; ++ ++ dev_priv->bios_header_start = radeon_bios16(dev_priv, 0x48); ++ ++ if (!dev_priv->bios_header_start) ++ goto free_bios; ++ ++ tmp = dev_priv->bios_header_start + 4; ++ if (!memcmp(dev_priv->bios + tmp, "ATOM", 4) || ++ !memcmp(dev_priv->bios + tmp, "MOTA", 4)) ++ dev_priv->is_atom_bios = true; ++ else ++ dev_priv->is_atom_bios = false; ++ ++ DRM_DEBUG("%sBIOS detected\n", dev_priv->is_atom_bios ? "ATOM" : "COM"); ++ return true; ++free_bios: ++ kfree(dev_priv->bios); ++ dev_priv->bios = NULL; ++ return false; ++} ++ ++int radeon_modeset_preinit(struct drm_device *dev) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ static struct card_info card; ++ int ret; ++ ++ card.dev = dev; ++ card.reg_read = cail_reg_read; ++ card.reg_write = cail_reg_write; ++ card.mc_read = cail_mc_read; ++ card.mc_write = cail_mc_write; ++ card.pll_read = cail_pll_read; ++ card.pll_write = cail_pll_write; ++ ++ ret = radeon_get_bios(dev); ++ if (!ret) ++ return -1; ++ ++ if (dev_priv->is_atom_bios) { ++ dev_priv->mode_info.atom_context = atom_parse(&card, dev_priv->bios); ++ radeon_atom_initialize_bios_scratch_regs(dev); ++ } else ++ radeon_combios_initialize_bios_scratch_regs(dev); ++ ++ radeon_get_clock_info(dev); ++ ++ return 0; ++} ++ ++int radeon_static_clocks_init(struct drm_device *dev) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ ++ if (radeon_dynclks != -1) { ++ ++ if (dev_priv->chip_family == CHIP_RS400 || ++ dev_priv->chip_family == CHIP_RS480) ++ radeon_dynclks = 0; ++ ++ if ((dev_priv->flags & RADEON_IS_MOBILITY) && !radeon_is_avivo(dev_priv)) { ++ radeon_set_dynamic_clock(dev, radeon_dynclks); ++ } else if (radeon_is_avivo(dev_priv)) { ++ if (radeon_dynclks) { ++ radeon_atom_static_pwrmgt_setup(dev, 1); ++ radeon_atom_dyn_clk_setup(dev, 1); ++ } ++ } ++ } ++ if (radeon_is_r300(dev_priv) || radeon_is_rv100(dev_priv)) ++ radeon_force_some_clocks(dev); ++ return 0; ++} ++ ++static bool radeon_card_posted(struct drm_device *dev) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ uint32_t reg; ++ ++ if (radeon_is_avivo(dev_priv)) { ++ reg = RADEON_READ(AVIVO_D1CRTC_CONTROL) | RADEON_READ(AVIVO_D2CRTC_CONTROL); ++ if (reg & AVIVO_CRTC_EN) ++ return true; ++ } else { ++ reg = RADEON_READ(RADEON_CRTC_GEN_CNTL) | RADEON_READ(RADEON_CRTC2_GEN_CNTL); ++ if (reg & RADEON_CRTC_EN) ++ return true; ++ } ++ return false; ++ ++} + int radeon_driver_load(struct drm_device *dev, unsigned long flags) + { + drm_radeon_private_t *dev_priv; +@@ -1722,6 +2781,8 @@ int radeon_driver_load(struct drm_device *dev, unsigned long flags) + dev->dev_private = (void *)dev_priv; + dev_priv->flags = flags; + ++ dev_priv->chip_family = flags & RADEON_FAMILY_MASK; ++ + switch (flags & RADEON_FAMILY_MASK) { + case CHIP_R100: + case CHIP_RV200: +@@ -1742,6 +2803,18 @@ int radeon_driver_load(struct drm_device *dev, unsigned long flags) + break; + } + ++ /* FEDORA HACKS - don't enable modesetting on pre-r300 ++ * until we have a mesa driver in place ++ */ ++ if (radeon_modeset == -1) { ++ if (dev_priv->chip_family <= CHIP_RV280) { ++ dev->driver->driver_features &= ~DRIVER_MODESET; ++ drm_put_minor(&dev->control); ++ radeon_modeset = 0; ++ } else ++ radeon_modeset = 1; ++ } ++ + if (drm_device_is_agp(dev)) + dev_priv->flags |= RADEON_IS_AGP; + else if (drm_device_is_pcie(dev)) +@@ -1749,9 +2822,34 @@ int radeon_driver_load(struct drm_device *dev, unsigned long flags) + else + dev_priv->flags |= RADEON_IS_PCI; + ++ DRM_DEBUG("%s card detected\n", ++ ((dev_priv->flags & RADEON_IS_AGP) ? "AGP" : (((dev_priv->flags & RADEON_IS_PCIE) ? "PCIE" : "PCI")))); ++ ++ if (drm_core_check_feature(dev, DRIVER_MODESET)) { ++ if (dev_priv->flags & RADEON_IS_AGP) { ++ /* disable AGP for any chips after RV280 if not specified */ ++ if ((dev_priv->chip_family > CHIP_RV280) && (radeon_agpmode == 0)) ++ radeon_agpmode = -1; ++ ++ if (radeon_agpmode == -1) { ++ dev_priv->flags &= ~RADEON_IS_AGP; ++ if (dev_priv->chip_family > CHIP_RV515 || ++ dev_priv->chip_family == CHIP_RV380 || ++ dev_priv->chip_family == CHIP_RV410 || ++ dev_priv->chip_family == CHIP_R423) { ++ DRM_INFO("Forcing AGP to PCIE mode\n"); ++ dev_priv->flags |= RADEON_IS_PCIE; ++ } else { ++ DRM_INFO("Forcing AGP to PCI mode\n"); ++ dev_priv->flags |= RADEON_IS_PCI; ++ } ++ } ++ } ++ } ++ + ret = drm_addmap(dev, drm_get_resource_start(dev, 2), + drm_get_resource_len(dev, 2), _DRM_REGISTERS, +- _DRM_READ_ONLY | _DRM_DRIVER, &dev_priv->mmio); ++ _DRM_DRIVER | _DRM_READ_ONLY, &dev_priv->mmio); + if (ret != 0) + return ret; + +@@ -1761,8 +2859,63 @@ int radeon_driver_load(struct drm_device *dev, unsigned long flags) + return ret; + } + +- DRM_DEBUG("%s card detected\n", +- ((dev_priv->flags & RADEON_IS_AGP) ? "AGP" : (((dev_priv->flags & RADEON_IS_PCIE) ? "PCIE" : "PCI")))); ++ if (drm_core_check_feature(dev, DRIVER_MODESET)) ++ radeon_modeset_preinit(dev); ++ ++ radeon_get_vram_type(dev); ++ ++ dev_priv->pll_errata = 0; ++ ++ if (dev_priv->chip_family == CHIP_R300 && ++ (RADEON_READ(RADEON_CONFIG_CNTL) & RADEON_CFG_ATI_REV_ID_MASK) == RADEON_CFG_ATI_REV_A11) ++ dev_priv->pll_errata |= CHIP_ERRATA_R300_CG; ++ ++ if (dev_priv->chip_family == CHIP_RV200 || ++ dev_priv->chip_family == CHIP_RS200) ++ dev_priv->pll_errata |= CHIP_ERRATA_PLL_DUMMYREADS; ++ ++ ++ if (dev_priv->chip_family == CHIP_RV100 || ++ dev_priv->chip_family == CHIP_RS100 || ++ dev_priv->chip_family == CHIP_RS200) ++ dev_priv->pll_errata |= CHIP_ERRATA_PLL_DELAY; ++ ++ /* check if cards are posted or not */ ++ if (!radeon_card_posted(dev) && dev_priv->bios) { ++ DRM_INFO("GPU not posted. posting now...\n"); ++ if (dev_priv->is_atom_bios) { ++ struct atom_context *ctx = dev_priv->mode_info.atom_context; ++ atom_asic_init(ctx); ++ } else { ++ radeon_combios_asic_init(dev); ++ } ++ } ++ ++ if (drm_core_check_feature(dev, DRIVER_MODESET)) ++ radeon_static_clocks_init(dev); ++ ++ /* init memory manager - start with all of VRAM and a 32MB GART aperture for now */ ++ dev_priv->fb_aper_offset = drm_get_resource_start(dev, 0); ++ ++ if (drm_core_check_feature(dev, DRIVER_MODESET)) { ++ ++ ret = radeon_gem_mm_init(dev); ++ if (ret) ++ goto modeset_fail; ++ ++ radeon_modeset_init(dev); ++ ++ radeon_modeset_cp_init(dev); ++ dev->devname = kstrdup(DRIVER_NAME, GFP_KERNEL); ++ ++ drm_irq_install(dev); ++ } ++ ++ ++ return ret; ++modeset_fail: ++ dev->driver->driver_features &= ~DRIVER_MODESET; ++ drm_put_minor(&dev->control); + return ret; + } + +@@ -1816,18 +2969,12 @@ void radeon_master_destroy(struct drm_device *dev, struct drm_master *master) + */ + int radeon_driver_firstopen(struct drm_device *dev) + { +- int ret; +- drm_local_map_t *map; + drm_radeon_private_t *dev_priv = dev->dev_private; + +- dev_priv->gart_info.table_size = RADEON_PCIGART_TABLE_SIZE; ++ if (drm_core_check_feature(dev, DRIVER_MODESET)) ++ return 0; + +- dev_priv->fb_aper_offset = drm_get_resource_start(dev, 0); +- ret = drm_addmap(dev, dev_priv->fb_aper_offset, +- drm_get_resource_len(dev, 0), _DRM_FRAME_BUFFER, +- _DRM_WRITE_COMBINING, &map); +- if (ret != 0) +- return ret; ++ dev_priv->gart_info.table_size = RADEON_PCIGART_TABLE_SIZE; + + return 0; + } +@@ -1836,6 +2983,14 @@ int radeon_driver_unload(struct drm_device *dev) + { + drm_radeon_private_t *dev_priv = dev->dev_private; + ++ if (drm_core_check_feature(dev, DRIVER_MODESET)) { ++ drm_irq_uninstall(dev); ++ radeon_modeset_cleanup(dev); ++ radeon_gem_mm_fini(dev); ++ } ++ ++ drm_rmmap(dev, dev_priv->mmio); ++ + DRM_DEBUG("\n"); + + drm_rmmap(dev, dev_priv->mmio); +@@ -1845,3 +3000,63 @@ int radeon_driver_unload(struct drm_device *dev) + dev->dev_private = NULL; + return 0; + } ++ ++void radeon_gart_flush(struct drm_device *dev) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ uint32_t loop = 0, val; ++ if (dev_priv->flags & RADEON_IS_IGPGART) { ++ IGP_READ_MCIND(dev_priv, RS480_GART_CACHE_CNTRL); ++ IGP_WRITE_MCIND(RS480_GART_CACHE_CNTRL, RS480_GART_CACHE_INVALIDATE); ++ val = IGP_READ_MCIND(dev_priv, RS480_GART_CACHE_CNTRL); ++ while ((val & RS480_GART_CACHE_INVALIDATE) && loop++ < 100000) { ++ val = IGP_READ_MCIND(dev_priv, RS480_GART_CACHE_CNTRL); ++ } ++ if (loop == 100000) ++ DRM_ERROR("Failed to invalidate IGP GART TLB\n"); ++ IGP_WRITE_MCIND(RS480_GART_CACHE_CNTRL, 0); ++ } else if (dev_priv->flags & RADEON_IS_PCIE) { ++ u32 tmp = RADEON_READ_PCIE(dev_priv, RADEON_PCIE_TX_GART_CNTL); ++ tmp |= RADEON_PCIE_TX_GART_INVALIDATE_TLB; ++ RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp); ++ tmp = RADEON_READ_PCIE(dev_priv, RADEON_PCIE_TX_GART_CNTL); ++ tmp &= ~RADEON_PCIE_TX_GART_INVALIDATE_TLB; ++ RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp); ++ } else { ++ ++ ++ } ++ ++} ++ ++void radeon_commit_ring(drm_radeon_private_t *dev_priv) ++{ ++ int i; ++ u32 *ring; ++ int tail_aligned; ++ ++ /* check if the ring is padded out to 16-dword alignment */ ++ ++ tail_aligned = dev_priv->ring.tail & 0xf; ++ if (tail_aligned) { ++ int num_p2 = 16 - tail_aligned; ++ ++ ring = dev_priv->ring.start; ++ /* pad with some CP_PACKET2 */ ++ for (i = 0; i < num_p2; i++) ++ ring[dev_priv->ring.tail + i] = CP_PACKET2(); ++ ++ dev_priv->ring.tail += i; ++ ++ dev_priv->ring.space -= num_p2 * sizeof(u32); ++ } ++ ++ dev_priv->ring.tail &= dev_priv->ring.tail_mask; ++ ++ DRM_MEMORYBARRIER(); ++ GET_RING_HEAD( dev_priv ); ++ ++ RADEON_WRITE( RADEON_CP_RB_WPTR, dev_priv->ring.tail ); ++ /* read from PCI bus to ensure correct posting */ ++ RADEON_READ( RADEON_CP_RB_RPTR ); ++} +diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c +new file mode 100644 +index 0000000..ddc7029 +--- /dev/null ++++ b/drivers/gpu/drm/radeon/radeon_cs.c +@@ -0,0 +1,583 @@ ++/* ++ * Copyright 2008 Jerome Glisse. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: ++ * Jerome Glisse ++ */ ++#include "drmP.h" ++#include "radeon_drm.h" ++#include "radeon_drv.h" ++#include "r300_reg.h" ++ ++int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *fpriv) ++{ ++ struct drm_radeon_cs_parser parser; ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ struct drm_radeon_cs *cs = data; ++ uint32_t cs_id; ++ struct drm_radeon_cs_chunk __user **chunk_ptr = NULL; ++ uint64_t *chunk_array; ++ uint64_t *chunk_array_ptr; ++ long size; ++ int r, i; ++ ++ /* set command stream id to 0 which is fake id */ ++ cs_id = 0; ++ cs->cs_id = cs_id; ++ ++ if (dev_priv == NULL) { ++ DRM_ERROR("called with no initialization\n"); ++ return -EINVAL; ++ } ++ if (!cs->num_chunks) { ++ return 0; ++ } ++ ++ ++ chunk_array = drm_calloc(cs->num_chunks, sizeof(uint64_t), DRM_MEM_DRIVER); ++ if (!chunk_array) { ++ return -ENOMEM; ++ } ++ ++ chunk_array_ptr = (uint64_t *)(unsigned long)(cs->chunks); ++ ++ if (DRM_COPY_FROM_USER(chunk_array, chunk_array_ptr, sizeof(uint64_t)*cs->num_chunks)) { ++ r = -EFAULT; ++ goto out; ++ } ++ ++ parser.dev = dev; ++ parser.file_priv = fpriv; ++ parser.reloc_index = -1; ++ parser.ib_index = -1; ++ parser.num_chunks = cs->num_chunks; ++ /* copy out the chunk headers */ ++ parser.chunks = drm_calloc(parser.num_chunks, sizeof(struct drm_radeon_kernel_chunk), DRM_MEM_DRIVER); ++ if (!parser.chunks) { ++ return -ENOMEM; ++ } ++ ++ for (i = 0; i < parser.num_chunks; i++) { ++ struct drm_radeon_cs_chunk user_chunk; ++ ++ chunk_ptr = (void __user *)(unsigned long)chunk_array[i]; ++ ++ if (DRM_COPY_FROM_USER(&user_chunk, chunk_ptr, sizeof(struct drm_radeon_cs_chunk))){ ++ r = -EFAULT; ++ goto out; ++ } ++ parser.chunks[i].chunk_id = user_chunk.chunk_id; ++ ++ if (parser.chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) ++ parser.reloc_index = i; ++ ++ if (parser.chunks[i].chunk_id == RADEON_CHUNK_ID_IB) ++ parser.ib_index = i; ++ ++ if (parser.chunks[i].chunk_id == RADEON_CHUNK_ID_OLD) { ++ parser.ib_index = i; ++ parser.reloc_index = -1; ++ } ++ ++ parser.chunks[i].length_dw = user_chunk.length_dw; ++ parser.chunks[i].chunk_data = (uint32_t *)(unsigned long)user_chunk.chunk_data; ++ ++ parser.chunks[i].kdata = NULL; ++ size = parser.chunks[i].length_dw * sizeof(uint32_t); ++ ++ switch(parser.chunks[i].chunk_id) { ++ case RADEON_CHUNK_ID_IB: ++ case RADEON_CHUNK_ID_OLD: ++ if (size == 0) { ++ r = -EINVAL; ++ goto out; ++ } ++ case RADEON_CHUNK_ID_RELOCS: ++ if (size) { ++ parser.chunks[i].kdata = drm_alloc(size, DRM_MEM_DRIVER); ++ if (!parser.chunks[i].kdata) { ++ r = -ENOMEM; ++ goto out; ++ } ++ ++ if (DRM_COPY_FROM_USER(parser.chunks[i].kdata, parser.chunks[i].chunk_data, size)) { ++ r = -EFAULT; ++ goto out; ++ } ++ } else ++ parser.chunks[i].kdata = NULL; ++ break; ++ default: ++ break; ++ } ++ DRM_DEBUG("chunk %d %d %d %p\n", i, parser.chunks[i].chunk_id, parser.chunks[i].length_dw, ++ parser.chunks[i].chunk_data); ++ } ++ ++ ++ if (parser.chunks[parser.ib_index].length_dw > (16 * 1024)) { ++ DRM_ERROR("cs->dwords too big: %d\n", parser.chunks[parser.ib_index].length_dw); ++ r = -EINVAL; ++ goto out; ++ } ++ ++ /* get ib */ ++ r = dev_priv->cs.ib_get(&parser); ++ if (r) { ++ DRM_ERROR("ib_get failed\n"); ++ goto out; ++ } ++ ++ ++ r = radeon_gem_prelocate(&parser); ++ if (r) { ++ goto out; ++ } ++ ++ /* now parse command stream */ ++ r = dev_priv->cs.parse(&parser); ++ if (r) { ++ goto out; ++ } ++ ++ /* emit cs id sequence */ ++ dev_priv->cs.id_emit(&parser, &cs_id); ++ ++ cs->cs_id = cs_id; ++ ++out: ++ dev_priv->cs.ib_free(&parser, r); ++ ++ for (i = 0; i < parser.num_chunks; i++) { ++ if (parser.chunks[i].kdata) ++ drm_free(parser.chunks[i].kdata, parser.chunks[i].length_dw * sizeof(uint32_t), DRM_MEM_DRIVER); ++ } ++ ++ drm_free(parser.chunks, sizeof(struct drm_radeon_kernel_chunk)*parser.num_chunks, DRM_MEM_DRIVER); ++ drm_free(chunk_array, sizeof(uint64_t)*parser.num_chunks, DRM_MEM_DRIVER); ++ ++ return r; ++} ++ ++/* for non-mm */ ++static int radeon_nomm_relocate(struct drm_radeon_cs_parser *parser, uint32_t *reloc, uint32_t *offset) ++{ ++ *offset = reloc[1]; ++ return 0; ++} ++#define RELOC_SIZE 2 ++#define RELOC_SIZE_NEW 0 ++#define RADEON_2D_OFFSET_MASK 0x3fffff ++ ++static __inline__ int radeon_cs_relocate_packet0(struct drm_radeon_cs_parser *parser, uint32_t offset_dw) ++{ ++ struct drm_device *dev = parser->dev; ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ uint32_t hdr, reg, val, packet3_hdr; ++ uint32_t tmp, offset; ++ struct drm_radeon_kernel_chunk *ib_chunk; ++ int ret; ++ ++ ib_chunk = &parser->chunks[parser->ib_index]; ++// if (parser->reloc_index == -1) ++// is_old = 1; ++ ++ hdr = ib_chunk->kdata[offset_dw]; ++ reg = (hdr & R300_CP_PACKET0_REG_MASK) << 2; ++ val = ib_chunk->kdata[offset_dw + 1]; ++ packet3_hdr = ib_chunk->kdata[offset_dw + 2]; ++ ++ /* this is too strict we may want to expand the length in the future and have ++ old kernels ignore it. */ ++ if (parser->reloc_index == -1) { ++ if (packet3_hdr != (RADEON_CP_PACKET3 | RADEON_CP_NOP | (RELOC_SIZE << 16))) { ++ DRM_ERROR("Packet 3 was %x should have been %x: reg is %x\n", packet3_hdr, RADEON_CP_PACKET3 | RADEON_CP_NOP | (RELOC_SIZE << 16), reg); ++ return -EINVAL; ++ } ++ } else { ++ if (packet3_hdr != (RADEON_CP_PACKET3 | RADEON_CP_NOP | (RELOC_SIZE_NEW << 16))) { ++ DRM_ERROR("Packet 3 was %x should have been %x: reg is %x\n", packet3_hdr, RADEON_CP_PACKET3 | RADEON_CP_NOP | (RELOC_SIZE_NEW << 16), reg); ++ return -EINVAL; ++ ++ } ++ } ++ ++ switch(reg) { ++ case RADEON_DST_PITCH_OFFSET: ++ case RADEON_SRC_PITCH_OFFSET: ++ /* pass in the start of the reloc */ ++ ret = dev_priv->cs.relocate(parser, ib_chunk->kdata + offset_dw + 2, &offset); ++ if (ret) ++ return ret; ++ tmp = (val & RADEON_2D_OFFSET_MASK) << 10; ++ val &= ~RADEON_2D_OFFSET_MASK; ++ offset += tmp; ++ offset >>= 10; ++ val |= offset; ++ break; ++ case RADEON_RB3D_COLOROFFSET: ++ case R300_RB3D_COLOROFFSET0: ++ case R300_RB3D_DEPTHOFFSET: ++ case R200_PP_TXOFFSET_0: ++ case R200_PP_TXOFFSET_1: ++ case RADEON_PP_TXOFFSET_0: ++ case RADEON_PP_TXOFFSET_1: ++ case R300_TX_OFFSET_0: ++ case R300_TX_OFFSET_0+4: ++ ret = dev_priv->cs.relocate(parser, ib_chunk->kdata + offset_dw + 2, &offset); ++ if (ret) ++ return ret; ++ ++ offset &= 0xffffffe0; ++ val += offset; ++ break; ++ default: ++ break; ++ } ++ ++ ib_chunk->kdata[offset_dw + 1] = val; ++ return 0; ++} ++ ++static int radeon_cs_relocate_packet3(struct drm_radeon_cs_parser *parser, ++ uint32_t offset_dw) ++{ ++ drm_radeon_private_t *dev_priv = parser->dev->dev_private; ++ uint32_t hdr, num_dw, reg, i; ++ uint32_t offset, val, tmp, nptr, cptr; ++ uint32_t *reloc; ++ int ret; ++ struct drm_radeon_kernel_chunk *ib_chunk; ++ ++ ib_chunk = &parser->chunks[parser->ib_index]; ++// if (parser->reloc_index == -1) ++// is_old = 1; ++ ++ hdr = ib_chunk->kdata[offset_dw]; ++ num_dw = (hdr & RADEON_CP_PACKET_COUNT_MASK) >> 16; ++ reg = hdr & 0xff00; ++ ++ switch(reg) { ++ case RADEON_CNTL_HOSTDATA_BLT: ++ { ++ val = ib_chunk->kdata[offset_dw + 2]; ++ ret = dev_priv->cs.relocate(parser, ib_chunk->kdata + offset_dw + num_dw + 2, &offset); ++ if (ret) ++ return ret; ++ ++ tmp = (val & RADEON_2D_OFFSET_MASK) << 10; ++ val &= ~RADEON_2D_OFFSET_MASK; ++ offset += tmp; ++ offset >>= 10; ++ val |= offset; ++ ++ ib_chunk->kdata[offset_dw + 2] = val; ++ } ++ case RADEON_3D_LOAD_VBPNTR: ++ nptr = ib_chunk->kdata[offset_dw + 1]; ++ cptr = offset_dw + 3; ++ for (i = 0; i < (nptr & ~1); i+= 2) { ++ reloc = ib_chunk->kdata + offset_dw + num_dw + 2; ++ reloc += ((i + 0) * 2); ++ ret = dev_priv->cs.relocate(parser, reloc, &offset); ++ if (ret) { ++ return ret; ++ } ++ ib_chunk->kdata[cptr] += offset; ++ cptr += 1; ++ reloc = ib_chunk->kdata + offset_dw + num_dw + 2; ++ reloc += ((i + 1) * 2); ++ ret = dev_priv->cs.relocate(parser, reloc, &offset); ++ if (ret) { ++ return ret; ++ } ++ ib_chunk->kdata[cptr] += offset; ++ cptr += 2; ++ } ++ if (nptr & 1) { ++ reloc = ib_chunk->kdata + offset_dw + num_dw + 2; ++ reloc += ((nptr - 1) * 2); ++ ret = dev_priv->cs.relocate(parser, reloc, &offset); ++ if (ret) { ++ return ret; ++ } ++ ib_chunk->kdata[cptr] += offset; ++ } ++ break; ++ case RADEON_CP_INDX_BUFFER: ++ reloc = ib_chunk->kdata + offset_dw + num_dw + 2; ++ ret = dev_priv->cs.relocate(parser, reloc, &offset); ++ if (ret) { ++ return ret; ++ } ++ ib_chunk->kdata[offset_dw + 2] += offset; ++ break; ++ default: ++ DRM_ERROR("reg is %x, not RADEON_CNTL_HOSTDATA_BLT\n", reg); ++ return -EINVAL; ++ } ++ return 0; ++} ++ ++int radeon_cs_packet0(struct drm_radeon_cs_parser *parser, uint32_t offset_dw) ++{ ++ uint32_t hdr, num_dw, reg; ++ int count_dw = 1; ++ int ret; ++ bool one_reg; ++ ++ hdr = parser->chunks[parser->ib_index].kdata[offset_dw]; ++ num_dw = ((hdr & RADEON_CP_PACKET_COUNT_MASK) >> 16) + 2; ++ reg = (hdr & R300_CP_PACKET0_REG_MASK) << 2; ++ one_reg = !!(hdr & RADEON_ONE_REG_WR); ++ ++ while (count_dw < num_dw) { ++ /* need to have something like the r300 validation here - ++ list of allowed registers */ ++ int flags; ++ ++ ret = r300_check_range(reg, 1); ++ switch(ret) { ++ case -1: ++ DRM_ERROR("Illegal register %x\n", reg); ++ break; ++ case 0: ++ break; ++ case 1: ++ flags = r300_get_reg_flags(reg); ++ if (flags == MARK_CHECK_OFFSET) { ++ if (num_dw > 2) { ++ DRM_ERROR("Cannot relocate inside type stream of reg0 packets\n"); ++ return -EINVAL; ++ } ++ ++ ret = radeon_cs_relocate_packet0(parser, offset_dw); ++ if (ret) { ++ DRM_ERROR("failed to relocate packet\n"); ++ return ret; ++ } ++ DRM_DEBUG("need to relocate %x %d\n", reg, flags); ++ /* okay it should be followed by a NOP */ ++ } else if (flags == MARK_CHECK_SCISSOR) { ++ DRM_DEBUG("need to validate scissor %x %d\n", reg, flags); ++ } else { ++ DRM_ERROR("illegal register %x %d\n", reg, flags); ++ return -EINVAL; ++ } ++ break; ++ } ++ if (one_reg) ++ break; ++ ++ count_dw++; ++ reg += 4; ++ } ++ return 0; ++} ++ ++int radeon_cs_parse(struct drm_radeon_cs_parser *parser) ++{ ++ volatile int rb; ++ struct drm_radeon_kernel_chunk *ib_chunk; ++ /* scan the packet for various things */ ++ int count_dw = 0, size_dw; ++ int ret = 0; ++ ++ ib_chunk = &parser->chunks[parser->ib_index]; ++ size_dw = ib_chunk->length_dw; ++ ++ while (count_dw < size_dw && ret == 0) { ++ int hdr = ib_chunk->kdata[count_dw]; ++ int num_dw = (hdr & RADEON_CP_PACKET_COUNT_MASK) >> 16; ++ int reg; ++ ++ switch (hdr & RADEON_CP_PACKET_MASK) { ++ case RADEON_CP_PACKET0: ++ ret = radeon_cs_packet0(parser, count_dw); ++ break; ++ case RADEON_CP_PACKET1: ++ case RADEON_CP_PACKET2: ++ reg = hdr & RADEON_CP_PACKET0_REG_MASK; ++ DRM_DEBUG("Packet 1/2: %d %x\n", num_dw, reg); ++ break; ++ ++ case RADEON_CP_PACKET3: ++ reg = hdr & 0xff00; ++ ++ switch(reg) { ++ case RADEON_CNTL_HOSTDATA_BLT: ++ case RADEON_3D_LOAD_VBPNTR: /* load vertex array pointers */ ++ case RADEON_CP_INDX_BUFFER: ++ radeon_cs_relocate_packet3(parser, count_dw); ++ break; ++ ++ case RADEON_CNTL_BITBLT_MULTI: ++ DRM_ERROR("need relocate packet 3 for %x\n", reg); ++ break; ++ ++ case RADEON_3D_DRAW_IMMD: /* triggers drawing using in-packet vertex data */ ++ case RADEON_CP_3D_DRAW_IMMD_2: /* triggers drawing using in-packet vertex data */ ++ case RADEON_CP_3D_DRAW_VBUF_2: /* triggers drawing of vertex buffers setup elsewhere */ ++ case RADEON_CP_3D_DRAW_INDX_2: /* triggers drawing using indices to vertex buffer */ ++ case RADEON_WAIT_FOR_IDLE: ++ case RADEON_CP_NOP: ++ break; ++ default: ++ DRM_ERROR("unknown packet 3 %x\n", reg); ++ ret = -EINVAL; ++ } ++ break; ++ } ++ ++ count_dw += num_dw+2; ++ } ++ ++ if (ret) ++ return ret; ++ ++ ++ /* copy the packet into the IB */ ++ memcpy(parser->ib, ib_chunk->kdata, ib_chunk->length_dw * sizeof(uint32_t)); ++ ++ /* read back last byte to flush WC buffers */ ++ rb = readl((parser->ib + (ib_chunk->length_dw-1) * sizeof(uint32_t))); ++ ++ return 0; ++} ++ ++uint32_t radeon_cs_id_get(struct drm_radeon_private *radeon) ++{ ++ /* FIXME: protect with a spinlock */ ++ /* FIXME: check if wrap affect last reported wrap & sequence */ ++ radeon->cs.id_scnt = (radeon->cs.id_scnt + 1) & 0x00FFFFFF; ++ if (!radeon->cs.id_scnt) { ++ /* increment wrap counter */ ++ radeon->cs.id_wcnt += 0x01000000; ++ /* valid sequence counter start at 1 */ ++ radeon->cs.id_scnt = 1; ++ } ++ return (radeon->cs.id_scnt | radeon->cs.id_wcnt); ++} ++ ++void r100_cs_id_emit(struct drm_radeon_cs_parser *parser, uint32_t *id) ++{ ++ drm_radeon_private_t *dev_priv = parser->dev->dev_private; ++ RING_LOCALS; ++ ++ dev_priv->irq_emitted = radeon_update_breadcrumb(parser->dev); ++ /* ISYNC_CNTL should have CPSCRACTH bit set */ ++ *id = radeon_cs_id_get(dev_priv); ++ /* emit id in SCRATCH4 (not used yet in old drm) */ ++ BEGIN_RING(10); ++ OUT_RING(CP_PACKET0(RADEON_CP_IB_BASE, 1)); ++ OUT_RING(parser->card_offset); ++ OUT_RING(parser->chunks[parser->ib_index].length_dw); ++ OUT_RING(CP_PACKET2()); ++ OUT_RING(CP_PACKET0(RADEON_SCRATCH_REG4, 0)); ++ OUT_RING(*id); ++ OUT_RING_REG(RADEON_LAST_SWI_REG, dev_priv->irq_emitted); ++ OUT_RING_REG(RADEON_GEN_INT_STATUS, RADEON_SW_INT_FIRE); ++ ADVANCE_RING(); ++ COMMIT_RING(); ++ ++} ++ ++void r300_cs_id_emit(struct drm_radeon_cs_parser *parser, uint32_t *id) ++{ ++ drm_radeon_private_t *dev_priv = parser->dev->dev_private; ++ int i; ++ RING_LOCALS; ++ ++ dev_priv->irq_emitted = radeon_update_breadcrumb(parser->dev); ++ ++ /* ISYNC_CNTL should not have CPSCRACTH bit set */ ++ *id = radeon_cs_id_get(dev_priv); ++ ++ /* emit id in SCRATCH6 */ ++ BEGIN_RING(16); ++ OUT_RING(CP_PACKET0(RADEON_CP_IB_BASE, 1)); ++ OUT_RING(parser->card_offset); ++ OUT_RING(parser->chunks[parser->ib_index].length_dw); ++ OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); ++ OUT_RING(0); ++ for (i = 0; i < 11; i++) /* emit fillers like fglrx */ ++ OUT_RING(CP_PACKET2()); ++ ADVANCE_RING(); ++ COMMIT_RING(); ++ ++ BEGIN_RING(16); ++ OUT_RING_REG(R300_RB3D_DSTCACHE_CTLSTAT, R300_RB3D_DC_FLUSH); ++ OUT_RING(CP_PACKET0(R300_CP_RESYNC_ADDR, 1)); ++ OUT_RING(6); ++ OUT_RING(*id); ++ OUT_RING_REG(R300_RB3D_DSTCACHE_CTLSTAT, R300_RB3D_DC_FINISH|R300_RB3D_DC_FLUSH); ++ /* emit inline breadcrumb for TTM fencing */ ++#if 1 ++ RADEON_WAIT_UNTIL_3D_IDLE(); ++ OUT_RING_REG(RADEON_LAST_SWI_REG, dev_priv->irq_emitted); ++#else ++ OUT_RING(CP_PACKET0(R300_CP_RESYNC_ADDR, 1)); ++ OUT_RING(3); /* breadcrumb register */ ++ OUT_RING(dev_priv->irq_emitted); ++ OUT_RING(CP_PACKET2()); ++#endif ++ OUT_RING_REG(RADEON_GEN_INT_STATUS, RADEON_SW_INT_FIRE); ++ OUT_RING(CP_PACKET2()); ++ OUT_RING(CP_PACKET2()); ++ OUT_RING(CP_PACKET2()); ++ ADVANCE_RING(); ++ COMMIT_RING(); ++ ++} ++ ++uint32_t r100_cs_id_last_get(struct drm_device *dev) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ ++ return GET_SCRATCH(4); ++} ++ ++uint32_t r300_cs_id_last_get(struct drm_device *dev) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ ++ return GET_SCRATCH(6); ++} ++ ++int radeon_cs_init(struct drm_device *dev) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ ++ if (dev_priv->chip_family < CHIP_RV280) { ++ dev_priv->cs.id_emit = r100_cs_id_emit; ++ dev_priv->cs.id_last_get = r100_cs_id_last_get; ++ } else if (dev_priv->chip_family < CHIP_R600) { ++ dev_priv->cs.id_emit = r300_cs_id_emit; ++ dev_priv->cs.id_last_get = r300_cs_id_last_get; ++ } ++ ++ dev_priv->cs.parse = radeon_cs_parse; ++ /* ib get depends on memory manager or not so memory manager */ ++ dev_priv->cs.relocate = radeon_nomm_relocate; ++ return 0; ++} +diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c +new file mode 100644 +index 0000000..fbd4143 +--- /dev/null ++++ b/drivers/gpu/drm/radeon/radeon_cursor.c +@@ -0,0 +1,247 @@ ++/* ++ * Copyright 2007-8 Advanced Micro Devices, Inc. ++ * Copyright 2008 Red Hat Inc. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: Dave Airlie ++ * Alex Deucher ++ */ ++#include "drmP.h" ++#include "radeon_drm.h" ++#include "radeon_drv.h" ++ ++#define CURSOR_WIDTH 64 ++#define CURSOR_HEIGHT 64 ++ ++static void radeon_lock_cursor(struct drm_crtc *crtc, bool lock) ++{ ++ struct drm_radeon_private *dev_priv = crtc->dev->dev_private; ++ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); ++ uint32_t cur_lock; ++ ++ if (radeon_is_avivo(dev_priv)) { ++ cur_lock = RADEON_READ(AVIVO_D1CUR_UPDATE + radeon_crtc->crtc_offset); ++ if (lock) ++ cur_lock |= AVIVO_D1CURSOR_UPDATE_LOCK; ++ else ++ cur_lock &= ~AVIVO_D1CURSOR_UPDATE_LOCK; ++ RADEON_WRITE(AVIVO_D1CUR_UPDATE + radeon_crtc->crtc_offset, cur_lock); ++ } else { ++ switch(radeon_crtc->crtc_id) { ++ case 0: ++ cur_lock = RADEON_READ(RADEON_CUR_OFFSET); ++ if (lock) ++ cur_lock |= RADEON_CUR_LOCK; ++ else ++ cur_lock &= ~RADEON_CUR_LOCK; ++ RADEON_WRITE(RADEON_CUR_OFFSET, cur_lock); ++ break; ++ case 1: ++ cur_lock = RADEON_READ(RADEON_CUR2_OFFSET); ++ if (lock) ++ cur_lock |= RADEON_CUR2_LOCK; ++ else ++ cur_lock &= ~RADEON_CUR2_LOCK; ++ RADEON_WRITE(RADEON_CUR2_OFFSET, cur_lock); ++ break; ++ default: ++ break; ++ } ++ } ++} ++ ++static void radeon_hide_cursor(struct drm_crtc *crtc) ++{ ++ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); ++ struct drm_radeon_private *dev_priv = crtc->dev->dev_private; ++ ++ if (radeon_is_avivo(dev_priv)) { ++ RADEON_WRITE(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset); ++ RADEON_WRITE_P(RADEON_MM_DATA, 0, ~AVIVO_D1CURSOR_EN); ++ } else { ++ switch(radeon_crtc->crtc_id) { ++ case 0: ++ RADEON_WRITE(RADEON_MM_INDEX, RADEON_CRTC_GEN_CNTL); ++ break; ++ case 1: ++ RADEON_WRITE(RADEON_MM_INDEX, RADEON_CRTC2_GEN_CNTL); ++ break; ++ default: ++ return; ++ } ++ RADEON_WRITE_P(RADEON_MM_DATA, 0, ~RADEON_CRTC_CUR_EN); ++ } ++} ++ ++static void radeon_show_cursor(struct drm_crtc *crtc) ++{ ++ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); ++ struct drm_radeon_private *dev_priv = crtc->dev->dev_private; ++ ++ if (radeon_is_avivo(dev_priv)) { ++ RADEON_WRITE(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset); ++ RADEON_WRITE(RADEON_MM_DATA, AVIVO_D1CURSOR_EN | ++ (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT)); ++ } else { ++ switch(radeon_crtc->crtc_id) { ++ case 0: ++ RADEON_WRITE(RADEON_MM_INDEX, RADEON_CRTC_GEN_CNTL); ++ break; ++ case 1: ++ RADEON_WRITE(RADEON_MM_INDEX, RADEON_CRTC2_GEN_CNTL); ++ break; ++ default: ++ return; ++ } ++ ++ RADEON_WRITE_P(RADEON_MM_DATA, (RADEON_CRTC_CUR_EN | ++ (RADEON_CRTC_CUR_MODE_24BPP << RADEON_CRTC_CUR_MODE_SHIFT)), ++ ~(RADEON_CRTC_CUR_EN | RADEON_CRTC_CUR_MODE_MASK)); ++ } ++} ++ ++static void radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj, ++ uint32_t width, uint32_t height) ++{ ++ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); ++ struct drm_radeon_private *dev_priv = crtc->dev->dev_private; ++ struct drm_radeon_gem_object *obj_priv; ++ ++ obj_priv = obj->driver_private; ++ ++ if (radeon_is_avivo(dev_priv)) { ++ RADEON_WRITE(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, ++ dev_priv->fb_location + obj_priv->bo->offset); ++ RADEON_WRITE(AVIVO_D1CUR_SIZE + radeon_crtc->crtc_offset, ++ (width - 1) << 16 | (height - 1)); ++ } else { ++ switch(radeon_crtc->crtc_id) { ++ case 0: ++ /* offset is from DISP_BASE_ADDRESS */ ++ RADEON_WRITE(RADEON_CUR_OFFSET, obj_priv->bo->offset); ++ break; ++ case 1: ++ /* offset is from DISP2_BASE_ADDRESS */ ++ RADEON_WRITE(RADEON_CUR2_OFFSET, obj_priv->bo->offset); ++ break; ++ default: ++ break; ++ } ++ } ++} ++ ++int radeon_crtc_cursor_set(struct drm_crtc *crtc, ++ struct drm_file *file_priv, ++ uint32_t handle, ++ uint32_t width, ++ uint32_t height) ++{ ++ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); ++ struct drm_gem_object *obj; ++ ++ if (!handle) { ++ /* turn off cursor */ ++ radeon_hide_cursor(crtc); ++ return 0; ++ } ++ ++ obj = drm_gem_object_lookup(crtc->dev, file_priv, handle); ++ if (!obj) { ++ DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, radeon_crtc->crtc_id); ++ return -EINVAL; ++ } ++ ++ if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) { ++ DRM_ERROR("bad cursor width or height %d x %d\n", width, height); ++ return -EINVAL; ++ } ++ ++ radeon_lock_cursor(crtc, true); ++ // XXX only 27 bit offset for legacy cursor ++ radeon_set_cursor(crtc, obj, width, height); ++ radeon_show_cursor(crtc); ++ radeon_lock_cursor(crtc, false); ++ ++ mutex_lock(&crtc->dev->struct_mutex); ++ drm_gem_object_unreference(obj); ++ mutex_unlock(&crtc->dev->struct_mutex); ++ ++ return 0; ++} ++ ++int radeon_crtc_cursor_move(struct drm_crtc *crtc, ++ int x, int y) ++{ ++ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); ++ struct drm_radeon_private *dev_priv = crtc->dev->dev_private; ++ int xorigin = 0, yorigin = 0; ++ ++ if (x < 0) ++ xorigin = -x + 1; ++ if (y < 0) ++ yorigin = -y + 1; ++ if (xorigin >= CURSOR_WIDTH) ++ xorigin = CURSOR_WIDTH - 1; ++ if (yorigin >= CURSOR_HEIGHT) ++ yorigin = CURSOR_HEIGHT - 1; ++ ++ radeon_lock_cursor(crtc, true); ++ if (radeon_is_avivo(dev_priv)) { ++ /* avivo cursor are offset into the total surface */ ++ x += crtc->x; ++ y += crtc->y; ++ DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y); ++ RADEON_WRITE(AVIVO_D1CUR_POSITION + radeon_crtc->crtc_offset, ++ ((xorigin ? 0: x) << 16) | ++ (yorigin ? 0 : y)); ++ RADEON_WRITE(AVIVO_D1CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin); ++ } else { ++ if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ++ y /= 2; ++ else if (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN) ++ y *= 2; ++ ++ switch(radeon_crtc->crtc_id) { ++ case 0: ++ RADEON_WRITE(RADEON_CUR_HORZ_VERT_OFF, (RADEON_CUR_LOCK ++ | (xorigin << 16) ++ | yorigin)); ++ RADEON_WRITE(RADEON_CUR_HORZ_VERT_POSN, (RADEON_CUR_LOCK ++ | ((xorigin ? 0 : x) << 16) ++ | (yorigin ? 0 : y))); ++ break; ++ case 1: ++ RADEON_WRITE(RADEON_CUR2_HORZ_VERT_OFF, (RADEON_CUR2_LOCK ++ | (xorigin << 16) ++ | yorigin)); ++ RADEON_WRITE(RADEON_CUR2_HORZ_VERT_POSN, (RADEON_CUR2_LOCK ++ | ((xorigin ? 0 : x) << 16) ++ | (yorigin ? 0 : y))); ++ break; ++ default: ++ break; ++ } ++ ++ } ++ radeon_lock_cursor(crtc, false); ++ ++ return 0; ++} ++ +diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c +new file mode 100644 +index 0000000..b44e5c2 +--- /dev/null ++++ b/drivers/gpu/drm/radeon/radeon_display.c +@@ -0,0 +1,719 @@ ++/* ++ * Copyright 2007-8 Advanced Micro Devices, Inc. ++ * Copyright 2008 Red Hat Inc. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: Dave Airlie ++ * Alex Deucher ++ */ ++#include "drmP.h" ++#include "radeon_drm.h" ++#include "radeon_drv.h" ++ ++#include "atom.h" ++#include ++ ++#include "drm_crtc_helper.h" ++#include "drm_edid.h" ++ ++int radeon_ddc_dump(struct drm_connector *connector); ++ ++ ++ ++static void avivo_crtc_load_lut(struct drm_crtc *crtc) ++{ ++ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); ++ struct drm_device *dev = crtc->dev; ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ int i; ++ ++ DRM_DEBUG("%d\n", radeon_crtc->crtc_id); ++ RADEON_WRITE(AVIVO_DC_LUTA_CONTROL + radeon_crtc->crtc_offset, 0); ++ ++ RADEON_WRITE(AVIVO_DC_LUTA_BLACK_OFFSET_BLUE + radeon_crtc->crtc_offset, 0); ++ RADEON_WRITE(AVIVO_DC_LUTA_BLACK_OFFSET_GREEN + radeon_crtc->crtc_offset, 0); ++ RADEON_WRITE(AVIVO_DC_LUTA_BLACK_OFFSET_RED + radeon_crtc->crtc_offset, 0); ++ ++ RADEON_WRITE(AVIVO_DC_LUTA_WHITE_OFFSET_BLUE + radeon_crtc->crtc_offset, 0xffff); ++ RADEON_WRITE(AVIVO_DC_LUTA_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff); ++ RADEON_WRITE(AVIVO_DC_LUTA_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff); ++ ++ RADEON_WRITE(AVIVO_DC_LUT_RW_SELECT, radeon_crtc->crtc_id); ++ RADEON_WRITE(AVIVO_DC_LUT_RW_MODE, 0); ++ RADEON_WRITE(AVIVO_DC_LUT_WRITE_EN_MASK, 0x0000003f); ++ ++ for (i = 0; i < 256; i++) { ++ RADEON_WRITE8(AVIVO_DC_LUT_RW_INDEX, i); ++ RADEON_WRITE(AVIVO_DC_LUT_30_COLOR, ++ (radeon_crtc->lut_r[i] << 22) | ++ (radeon_crtc->lut_g[i] << 12) | ++ (radeon_crtc->lut_b[i] << 2)); ++ } ++ ++ RADEON_WRITE(AVIVO_D1GRPH_LUT_SEL + radeon_crtc->crtc_offset, radeon_crtc->crtc_id); ++} ++ ++static void legacy_crtc_load_lut(struct drm_crtc *crtc) ++{ ++ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); ++ struct drm_device *dev = crtc->dev; ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ int i; ++ uint32_t dac2_cntl; ++ ++ dac2_cntl = RADEON_READ(RADEON_DAC_CNTL2); ++ if (radeon_crtc->crtc_id == 0) ++ dac2_cntl &= (uint32_t)~RADEON_DAC2_PALETTE_ACC_CTL; ++ else ++ dac2_cntl |= RADEON_DAC2_PALETTE_ACC_CTL; ++ RADEON_WRITE(RADEON_DAC_CNTL2, dac2_cntl); ++ ++ for (i = 0; i < 256; i++) { ++ RADEON_WRITE8(RADEON_PALETTE_INDEX, i); ++ RADEON_WRITE(RADEON_PALETTE_DATA, ++ (radeon_crtc->lut_r[i] << 16) | ++ (radeon_crtc->lut_g[i] << 8) | ++ (radeon_crtc->lut_b[i] << 0)); ++ } ++} ++ ++void radeon_crtc_load_lut(struct drm_crtc *crtc) ++{ ++ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); ++ struct drm_device *dev = crtc->dev; ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ ++ if (!crtc->enabled) ++ return; ++ ++ if (radeon_is_avivo(dev_priv)) ++ avivo_crtc_load_lut(crtc); ++ else ++ legacy_crtc_load_lut(crtc); ++} ++ ++/** Sets the color ramps on behalf of RandR */ ++void radeon_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, ++ u16 blue, int regno) ++{ ++ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); ++ ++ if (regno==0) ++ DRM_DEBUG("gamma set %d\n", radeon_crtc->crtc_id); ++ radeon_crtc->lut_r[regno] = red >> 8; ++ radeon_crtc->lut_g[regno] = green >> 8; ++ radeon_crtc->lut_b[regno] = blue >> 8; ++} ++ ++static void radeon_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, ++ u16 *blue, uint32_t size) ++{ ++ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); ++ int i, j; ++ ++ if (size != 256) ++ return; ++ ++ if (crtc->fb->depth == 16) { ++ for (i = 0; i < 64; i++) { ++ if (i <= 31) { ++ for (j = 0; j < 8; j++) { ++ radeon_crtc->lut_r[i * 8 + j] = red[i] >> 8; ++ radeon_crtc->lut_b[i * 8 + j] = blue[i] >> 8; ++ } ++ } ++ for (j = 0; j < 4; j++) ++ radeon_crtc->lut_g[i * 4 + j] = green[i] >> 8; ++ } ++ } else { ++ for (i = 0; i < 256; i++) { ++ radeon_crtc->lut_r[i] = red[i] >> 8; ++ radeon_crtc->lut_g[i] = green[i] >> 8; ++ radeon_crtc->lut_b[i] = blue[i] >> 8; ++ } ++ } ++ ++ radeon_crtc_load_lut(crtc); ++} ++ ++static void radeon_crtc_destroy(struct drm_crtc *crtc) ++{ ++ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); ++ ++ drm_crtc_cleanup(crtc); ++ kfree(radeon_crtc); ++} ++ ++static const struct drm_crtc_funcs radeon_crtc_funcs = { ++ .cursor_set = radeon_crtc_cursor_set, ++ .cursor_move = radeon_crtc_cursor_move, ++ .gamma_set = radeon_crtc_gamma_set, ++ .set_config = drm_crtc_helper_set_config, ++ .destroy = radeon_crtc_destroy, ++}; ++ ++static void radeon_crtc_init(struct drm_device *dev, int index) ++{ ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ struct radeon_crtc *radeon_crtc; ++ int i; ++ ++ radeon_crtc = kzalloc(sizeof(struct radeon_crtc) + (RADEONFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL); ++ // radeon_crtc = kzalloc(sizeof(struct radeon_crtc), GFP_KERNEL); ++ if (radeon_crtc == NULL) ++ return; ++ ++ drm_crtc_init(dev, &radeon_crtc->base, &radeon_crtc_funcs); ++ ++ drm_mode_crtc_set_gamma_size(&radeon_crtc->base, 256); ++ radeon_crtc->crtc_id = index; ++ ++ radeon_crtc->mode_set.crtc = &radeon_crtc->base; ++ radeon_crtc->mode_set.connectors = (struct drm_connector **)(radeon_crtc + 1); ++ radeon_crtc->mode_set.num_connectors = 0; ++ ++ for (i = 0; i < 256; i++) { ++ radeon_crtc->lut_r[i] = i; ++ radeon_crtc->lut_g[i] = i; ++ radeon_crtc->lut_b[i] = i; ++ } ++ ++ if (dev_priv->is_atom_bios && (radeon_is_avivo(dev_priv) || radeon_r4xx_atom)) ++ radeon_atombios_init_crtc(dev, radeon_crtc); ++ else ++ radeon_legacy_init_crtc(dev, radeon_crtc); ++} ++ ++bool radeon_legacy_setup_enc_conn(struct drm_device *dev) ++{ ++ ++ radeon_get_legacy_connector_info_from_bios(dev); ++ return false; ++} ++ ++bool radeon_setup_enc_conn(struct drm_device *dev) ++{ ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ struct radeon_mode_info *mode_info = &dev_priv->mode_info; ++ /* do all the mac and stuff */ ++ struct drm_connector *connector; ++ struct drm_encoder *encoder; ++ int i; ++ ++ if (dev_priv->is_atom_bios) ++ radeon_get_atom_connector_info_from_bios_connector_table(dev); ++ else ++ radeon_get_legacy_connector_info_from_bios(dev); ++ ++ for (i = 0; i < RADEON_MAX_BIOS_CONNECTOR; i++) { ++ if (!mode_info->bios_connector[i].valid) ++ continue; ++ ++ /* add a connector for this */ ++ if (mode_info->bios_connector[i].connector_type == CONNECTOR_NONE) ++ continue; ++ ++ connector = radeon_connector_add(dev, i); ++ if (!connector) ++ continue; ++ ++ encoder = NULL; ++ /* if we find an LVDS connector */ ++ if (mode_info->bios_connector[i].connector_type == CONNECTOR_LVDS) { ++ if (radeon_is_avivo(dev_priv) || radeon_r4xx_atom) ++ encoder = radeon_encoder_lvtma_add(dev, i); ++ else ++ encoder = radeon_encoder_legacy_lvds_add(dev, i); ++ if (encoder) ++ drm_mode_connector_attach_encoder(connector, encoder); ++ } ++ ++ /* DAC on DVI or VGA */ ++ if ((mode_info->bios_connector[i].connector_type == CONNECTOR_DVI_I) || ++ (mode_info->bios_connector[i].connector_type == CONNECTOR_DVI_A) || ++ (mode_info->bios_connector[i].connector_type == CONNECTOR_VGA)) { ++ if (radeon_is_avivo(dev_priv) || radeon_r4xx_atom) ++ encoder = radeon_encoder_atom_dac_add(dev, i, mode_info->bios_connector[i].dac_type, 0); ++ else { ++ if (mode_info->bios_connector[i].dac_type == DAC_PRIMARY) ++ encoder = radeon_encoder_legacy_primary_dac_add(dev, i, 0); ++ else if (mode_info->bios_connector[i].dac_type == DAC_TVDAC) ++ encoder = radeon_encoder_legacy_tv_dac_add(dev, i, 0); ++ } ++ if (encoder) ++ drm_mode_connector_attach_encoder(connector, encoder); ++ } ++ ++ /* TMDS on DVI */ ++ if ((mode_info->bios_connector[i].connector_type == CONNECTOR_DVI_I) || ++ (mode_info->bios_connector[i].connector_type == CONNECTOR_DVI_D) || ++ (mode_info->bios_connector[i].connector_type == CONNECTOR_HDMI_TYPE_A) || ++ (mode_info->bios_connector[i].connector_type == CONNECTOR_HDMI_TYPE_B)) { ++ if (radeon_is_avivo(dev_priv) || radeon_r4xx_atom) ++ encoder = radeon_encoder_atom_tmds_add(dev, i, mode_info->bios_connector[i].tmds_type); ++ else { ++ if (mode_info->bios_connector[i].tmds_type == TMDS_INT) ++ encoder = radeon_encoder_legacy_tmds_int_add(dev, i); ++ else if (mode_info->bios_connector[i].tmds_type == TMDS_EXT) ++ encoder = radeon_encoder_legacy_tmds_ext_add(dev, i); ++ } ++ if (encoder) ++ drm_mode_connector_attach_encoder(connector, encoder); ++ } ++ ++ /* TVDAC on DIN */ ++ if (mode_info->bios_connector[i].connector_type == CONNECTOR_DIN) { ++ if (radeon_is_avivo(dev_priv) || radeon_r4xx_atom) ++ encoder = radeon_encoder_atom_dac_add(dev, i, mode_info->bios_connector[i].dac_type, 1); ++ else { ++ if (mode_info->bios_connector[i].dac_type == DAC_TVDAC) ++ encoder = radeon_encoder_legacy_tv_dac_add(dev, i, 0); ++ } ++ if (encoder) ++ drm_mode_connector_attach_encoder(connector, encoder); ++ } ++ } ++ ++ list_for_each_entry(connector, &dev->mode_config.connector_list, head) ++ radeon_ddc_dump(connector); ++ return true; ++} ++ ++int radeon_ddc_get_modes(struct radeon_connector *radeon_connector) ++{ ++ struct drm_radeon_private *dev_priv = radeon_connector->base.dev->dev_private; ++ struct edid *edid; ++ int ret = 0; ++ ++ if (!radeon_connector->ddc_bus) ++ return -1; ++ radeon_i2c_do_lock(radeon_connector, 1); ++ edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter); ++ radeon_i2c_do_lock(radeon_connector, 0); ++ if (edid) { ++ /* update digital bits here */ ++ if (edid->digital) ++ radeon_connector->use_digital = 1; ++ else ++ radeon_connector->use_digital = 0; ++ drm_mode_connector_update_edid_property(&radeon_connector->base, edid); ++ ret = drm_add_edid_modes(&radeon_connector->base, edid); ++ kfree(edid); ++ return ret; ++ } ++ return -1; ++} ++ ++int radeon_ddc_dump(struct drm_connector *connector) ++{ ++ struct edid *edid; ++ struct radeon_connector *radeon_connector = to_radeon_connector(connector); ++ int ret = 0; ++ ++ if (!radeon_connector->ddc_bus) ++ return -1; ++ radeon_i2c_do_lock(radeon_connector, 1); ++ edid = drm_get_edid(connector, &radeon_connector->ddc_bus->adapter); ++ radeon_i2c_do_lock(radeon_connector, 0); ++ if (edid) { ++ kfree(edid); ++ } ++ return ret; ++} ++ ++static inline uint32_t radeon_div(uint64_t n, uint32_t d) ++{ ++ uint64_t x, y, result; ++ uint64_t mod; ++ ++ n += d / 2; ++ ++ mod = do_div(n, d); ++ return n; ++} ++ ++void radeon_compute_pll(struct radeon_pll *pll, ++ uint64_t freq, ++ uint32_t *dot_clock_p, ++ uint32_t *fb_div_p, ++ uint32_t *ref_div_p, ++ uint32_t *post_div_p, ++ int flags) ++{ ++ uint32_t min_ref_div = pll->min_ref_div; ++ uint32_t max_ref_div = pll->max_ref_div; ++ uint32_t best_vco = pll->best_vco; ++ uint32_t best_post_div = 1; ++ uint32_t best_ref_div = 1; ++ uint32_t best_feedback_div = 1; ++ uint32_t best_freq = -1; ++ uint32_t best_error = 0xffffffff; ++ uint32_t best_vco_diff = 1; ++ uint32_t post_div; ++ ++ DRM_DEBUG("PLL freq %llu %lu %lu\n", freq, pll->min_ref_div, pll->max_ref_div); ++ freq = freq * 1000; ++ ++ if (flags & RADEON_PLL_USE_REF_DIV) ++ min_ref_div = max_ref_div = pll->reference_div; ++ else { ++ while (min_ref_div < max_ref_div-1) { ++ uint32_t mid=(min_ref_div+max_ref_div)/2; ++ uint32_t pll_in = pll->reference_freq / mid; ++ if (pll_in < pll->pll_in_min) ++ max_ref_div = mid; ++ else if (pll_in > pll->pll_in_max) ++ min_ref_div = mid; ++ else ++ break; ++ } ++ } ++ ++ for (post_div = pll->min_post_div; post_div <= pll->max_post_div; ++post_div) { ++ uint32_t ref_div; ++ ++ if ((flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1)) ++ continue; ++ ++ /* legacy radeons only have a few post_divs */ ++ if (flags & RADEON_PLL_LEGACY) { ++ if ((post_div == 5) || ++ (post_div == 7) || ++ (post_div == 9) || ++ (post_div == 10) || ++ (post_div == 11) || ++ (post_div == 13) || ++ (post_div == 14) || ++ (post_div == 15)) ++ continue; ++ } ++ ++ for (ref_div = min_ref_div; ref_div <= max_ref_div; ++ref_div) { ++ uint32_t feedback_div, current_freq, error, vco_diff; ++ uint32_t pll_in = pll->reference_freq / ref_div; ++ uint32_t min_feed_div = pll->min_feedback_div; ++ uint32_t max_feed_div = pll->max_feedback_div+1; ++ ++ if (pll_in < pll->pll_in_min || pll_in > pll->pll_in_max) ++ continue; ++ ++ while (min_feed_div < max_feed_div) { ++ uint32_t vco; ++ feedback_div = (min_feed_div+max_feed_div)/2; ++ ++ vco = radeon_div((uint64_t)pll->reference_freq * feedback_div, ++ ref_div); ++ ++ if (vco < pll->pll_out_min) { ++ min_feed_div = feedback_div+1; ++ continue; ++ } else if(vco > pll->pll_out_max) { ++ max_feed_div = feedback_div; ++ continue; ++ } ++ ++ current_freq = radeon_div((uint64_t)pll->reference_freq * 10000 * feedback_div, ++ ref_div * post_div); ++ ++ error = abs(current_freq - freq); ++ vco_diff = abs(vco - best_vco); ++ ++ if ((best_vco == 0 && error < best_error) || ++ (best_vco != 0 && ++ (error < best_error - 100 || ++ (abs(error - best_error) < 100 && vco_diff < best_vco_diff )))) { ++ best_post_div = post_div; ++ best_ref_div = ref_div; ++ best_feedback_div = feedback_div; ++ best_freq = current_freq; ++ best_error = error; ++ best_vco_diff = vco_diff; ++ } else if (current_freq == freq) { ++ if (best_freq == -1) { ++ best_post_div = post_div; ++ best_ref_div = ref_div; ++ best_feedback_div = feedback_div; ++ best_freq = current_freq; ++ best_error = error; ++ best_vco_diff = vco_diff; ++ } else if (((flags & RADEON_PLL_PREFER_LOW_REF_DIV) && (ref_div < best_ref_div)) || ++ ((flags & RADEON_PLL_PREFER_HIGH_REF_DIV) && (ref_div > best_ref_div)) || ++ ((flags & RADEON_PLL_PREFER_LOW_FB_DIV) && (feedback_div < best_feedback_div)) || ++ ((flags & RADEON_PLL_PREFER_HIGH_FB_DIV) && (feedback_div > best_feedback_div)) || ++ ((flags & RADEON_PLL_PREFER_LOW_POST_DIV) && (post_div < best_post_div)) || ++ ((flags & RADEON_PLL_PREFER_HIGH_POST_DIV) && (post_div > best_post_div))) { ++ best_post_div = post_div; ++ best_ref_div = ref_div; ++ best_feedback_div = feedback_div; ++ best_freq = current_freq; ++ best_error = error; ++ best_vco_diff = vco_diff; ++ } ++ } ++ ++ if (current_freq < freq) ++ min_feed_div = feedback_div+1; ++ else ++ max_feed_div = feedback_div; ++ } ++ } ++ } ++ ++ *dot_clock_p = best_freq / 10000; ++ *fb_div_p = best_feedback_div; ++ *ref_div_p = best_ref_div; ++ *post_div_p = best_post_div; ++} ++ ++void radeon_get_clock_info(struct drm_device *dev) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ struct radeon_pll *p1pll = &dev_priv->mode_info.p1pll; ++ struct radeon_pll *p2pll = &dev_priv->mode_info.p2pll; ++ struct radeon_pll *spll = &dev_priv->mode_info.spll; ++ struct radeon_pll *mpll = &dev_priv->mode_info.mpll; ++ int ret; ++ ++ if (dev_priv->is_atom_bios) ++ ret = radeon_atom_get_clock_info(dev); ++ else ++ ret = radeon_combios_get_clock_info(dev); ++ ++ if (ret) { ++ if (p1pll->reference_div < 2) ++ p1pll->reference_div = 12; ++ if (p2pll->reference_div < 2) ++ p2pll->reference_div = 12; ++ } else { ++ // TODO FALLBACK ++ } ++ ++ /* pixel clocks */ ++ if (radeon_is_avivo(dev_priv)) { ++ p1pll->min_post_div = 2; ++ p1pll->max_post_div = 0x7f; ++ p2pll->min_post_div = 2; ++ p2pll->max_post_div = 0x7f; ++ } else { ++ p1pll->min_post_div = 1; ++ p1pll->max_post_div = 16; ++ p2pll->min_post_div = 1; ++ p2pll->max_post_div = 12; ++ } ++ ++ p1pll->min_ref_div = 2; ++ p1pll->max_ref_div = 0x3ff; ++ p1pll->min_feedback_div = 4; ++ p1pll->max_feedback_div = 0x7ff; ++ p1pll->best_vco = 0; ++ ++ p2pll->min_ref_div = 2; ++ p2pll->max_ref_div = 0x3ff; ++ p2pll->min_feedback_div = 4; ++ p2pll->max_feedback_div = 0x7ff; ++ p2pll->best_vco = 0; ++ ++ /* system clock */ ++ spll->min_post_div = 1; ++ spll->max_post_div = 1; ++ spll->min_ref_div = 2; ++ spll->max_ref_div = 0xff; ++ spll->min_feedback_div = 4; ++ spll->max_feedback_div = 0xff; ++ spll->best_vco = 0; ++ ++ /* memory clock */ ++ mpll->min_post_div = 1; ++ mpll->max_post_div = 1; ++ mpll->min_ref_div = 2; ++ mpll->max_ref_div = 0xff; ++ mpll->min_feedback_div = 4; ++ mpll->max_feedback_div = 0xff; ++ mpll->best_vco = 0; ++ ++} ++ ++/* not sure of the best place for these */ ++/* 10 khz */ ++void radeon_legacy_set_engine_clock(struct drm_device *dev, int eng_clock) ++{ ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ struct radeon_mode_info *mode_info = &dev_priv->mode_info; ++ struct radeon_pll *spll = &mode_info->spll; ++ uint32_t ref_div, fb_div; ++ uint32_t m_spll_ref_fb_div; ++ ++ /* FIXME wait for idle */ ++ ++ m_spll_ref_fb_div = RADEON_READ_PLL(dev_priv, RADEON_M_SPLL_REF_FB_DIV); ++ m_spll_ref_fb_div &= ((RADEON_M_SPLL_REF_DIV_MASK << RADEON_M_SPLL_REF_DIV_SHIFT) | ++ (RADEON_MPLL_FB_DIV_MASK << RADEON_MPLL_FB_DIV_SHIFT)); ++ ref_div = m_spll_ref_fb_div & RADEON_M_SPLL_REF_DIV_MASK; ++ ++ fb_div = radeon_div(eng_clock * ref_div, spll->reference_freq); ++ m_spll_ref_fb_div |= (fb_div & RADEON_SPLL_FB_DIV_MASK) << RADEON_SPLL_FB_DIV_SHIFT; ++ RADEON_WRITE_PLL(dev_priv, RADEON_M_SPLL_REF_FB_DIV, m_spll_ref_fb_div); ++ ++} ++ ++/* 10 khz */ ++void radeon_legacy_set_memory_clock(struct drm_device *dev, int mem_clock) ++{ ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ struct radeon_mode_info *mode_info = &dev_priv->mode_info; ++ struct radeon_pll *mpll = &mode_info->mpll; ++ uint32_t ref_div, fb_div; ++ uint32_t m_spll_ref_fb_div; ++ ++ /* FIXME wait for idle */ ++ ++ m_spll_ref_fb_div = RADEON_READ_PLL(dev_priv, RADEON_M_SPLL_REF_FB_DIV); ++ m_spll_ref_fb_div &= ((RADEON_M_SPLL_REF_DIV_MASK << RADEON_M_SPLL_REF_DIV_SHIFT) | ++ (RADEON_SPLL_FB_DIV_MASK << RADEON_SPLL_FB_DIV_SHIFT)); ++ ref_div = m_spll_ref_fb_div & RADEON_M_SPLL_REF_DIV_MASK; ++ ++ fb_div = radeon_div(mem_clock * ref_div, mpll->reference_freq); ++ m_spll_ref_fb_div |= (fb_div & RADEON_MPLL_FB_DIV_MASK) << RADEON_MPLL_FB_DIV_SHIFT; ++ RADEON_WRITE_PLL(dev_priv, RADEON_M_SPLL_REF_FB_DIV, m_spll_ref_fb_div); ++ ++} ++ ++static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb) ++{ ++ struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb); ++ struct drm_device *dev = fb->dev; ++ ++ if (fb->fbdev) ++ radeonfb_remove(dev, fb); ++ ++ if (radeon_fb->obj) { ++ mutex_lock(&dev->struct_mutex); ++ drm_gem_object_unreference(radeon_fb->obj); ++ mutex_unlock(&dev->struct_mutex); ++ } ++ drm_framebuffer_cleanup(fb); ++ kfree(radeon_fb); ++} ++ ++static int radeon_user_framebuffer_create_handle(struct drm_framebuffer *fb, ++ struct drm_file *file_priv, ++ unsigned int *handle) ++{ ++ struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb); ++ ++ return drm_gem_handle_create(file_priv, radeon_fb->obj, handle); ++} ++ ++static const struct drm_framebuffer_funcs radeon_fb_funcs = { ++ .destroy = radeon_user_framebuffer_destroy, ++ .create_handle = radeon_user_framebuffer_create_handle, ++}; ++ ++struct drm_framebuffer * ++radeon_framebuffer_create(struct drm_device *dev, ++ struct drm_mode_fb_cmd *mode_cmd, ++ struct drm_gem_object *obj) ++{ ++ struct radeon_framebuffer *radeon_fb; ++ ++ radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL); ++ if (!radeon_fb) ++ return NULL; ++ ++ drm_framebuffer_init(dev, &radeon_fb->base, &radeon_fb_funcs); ++ drm_helper_mode_fill_fb_struct(&radeon_fb->base, mode_cmd); ++ ++ radeon_fb->obj = obj; ++ ++ return &radeon_fb->base; ++} ++ ++static struct drm_framebuffer * ++radeon_user_framebuffer_create(struct drm_device *dev, ++ struct drm_file *file_priv, ++ struct drm_mode_fb_cmd *mode_cmd) ++{ ++ ++ struct radeon_framebuffer *radeon_fb; ++ struct drm_gem_object *obj; ++ ++ obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle); ++ ++ return radeon_framebuffer_create(dev, mode_cmd, obj); ++} ++ ++static const struct drm_mode_config_funcs radeon_mode_funcs = { ++ .fb_create = radeon_user_framebuffer_create, ++ .fb_changed = radeonfb_probe, ++}; ++ ++ ++int radeon_modeset_init(struct drm_device *dev) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ static struct card_info card; ++ size_t size; ++ int num_crtc = 2, i; ++ int ret; ++ ++ drm_mode_config_init(dev); ++ ++ dev->mode_config.funcs = (void *)&radeon_mode_funcs; ++ ++ if (radeon_is_avivo(dev_priv)) { ++ dev->mode_config.max_width = 8192; ++ dev->mode_config.max_height = 8192; ++ } else { ++ dev->mode_config.max_width = 4096; ++ dev->mode_config.max_height = 4096; ++ } ++ ++ dev->mode_config.fb_base = dev_priv->fb_aper_offset; ++ ++ /* allocate crtcs - TODO single crtc */ ++ for (i = 0; i < num_crtc; i++) { ++ radeon_crtc_init(dev, i); ++ } ++ ++ /* okay we should have all the bios connectors */ ++ ++ ret = radeon_setup_enc_conn(dev); ++ ++ if (!ret) ++ return ret; ++ ++ drm_helper_initial_config(dev, false); ++ ++ return 0; ++} ++ ++ ++int radeon_load_modeset_init(struct drm_device *dev) ++{ ++ int ret; ++ ret = radeon_modeset_init(dev); ++ ++ return ret; ++} ++ ++void radeon_modeset_cleanup(struct drm_device *dev) ++{ ++ drm_mode_config_cleanup(dev); ++} +diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c +index fef2078..b7b4eb5 100644 +--- a/drivers/gpu/drm/radeon/radeon_drv.c ++++ b/drivers/gpu/drm/radeon/radeon_drv.c +@@ -35,53 +35,77 @@ + #include "radeon_drv.h" + + #include "drm_pciids.h" ++#include + + int radeon_no_wb; ++int radeon_dynclks = -1; ++int radeon_r4xx_atom = 0; ++int radeon_agpmode = 0; ++int radeon_vram_zero = 0; ++int radeon_gart_size = 512; /* default gart size */ + + MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers"); + module_param_named(no_wb, radeon_no_wb, int, 0444); + ++int radeon_modeset = -1; ++module_param_named(modeset, radeon_modeset, int, 0400); ++ ++MODULE_PARM_DESC(dynclks, "Disable/Enable dynamic clocks"); ++module_param_named(dynclks, radeon_dynclks, int, 0444); ++ ++MODULE_PARM_DESC(r4xx_atom, "Enable ATOMBIOS modesetting for R4xx"); ++module_param_named(r4xx_atom, radeon_r4xx_atom, int, 0444); ++ ++MODULE_PARM_DESC(agpmode, "AGP Mode (-1 == PCI)"); ++module_param_named(agpmode, radeon_agpmode, int, 0444); ++ ++MODULE_PARM_DESC(vramzero, "Zero VRAM for new objects"); ++module_param_named(vramzero, radeon_vram_zero, int, 0600); ++ ++MODULE_PARM_DESC(gartsize, "Size of PCIE/IGP gart to setup in megabytes (32,64, etc)\n"); ++module_param_named(gartsize, radeon_gart_size, int, 0600); ++ + static int dri_library_name(struct drm_device *dev, char *buf) + { + drm_radeon_private_t *dev_priv = dev->dev_private; +- int family = dev_priv->flags & RADEON_FAMILY_MASK; ++ int family; ++ ++ if (!dev_priv) ++ return 0; + ++ family = dev_priv->flags & RADEON_FAMILY_MASK; + return snprintf(buf, PAGE_SIZE, "%s\n", + (family < CHIP_R200) ? "radeon" : + ((family < CHIP_R300) ? "r200" : + "r300")); + } + +-static int radeon_suspend(struct drm_device *dev, pm_message_t state) +-{ +- drm_radeon_private_t *dev_priv = dev->dev_private; +- +- /* Disable *all* interrupts */ +- if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) +- RADEON_WRITE(R500_DxMODE_INT_MASK, 0); +- RADEON_WRITE(RADEON_GEN_INT_CNTL, 0); +- return 0; +-} +- +-static int radeon_resume(struct drm_device *dev) +-{ +- drm_radeon_private_t *dev_priv = dev->dev_private; +- +- /* Restore interrupt registers */ +- if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) +- RADEON_WRITE(R500_DxMODE_INT_MASK, dev_priv->r500_disp_irq_reg); +- RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg); +- return 0; +-} +- + static struct pci_device_id pciidlist[] = { + radeon_PCI_IDS + }; + ++extern struct drm_fence_driver radeon_fence_driver; ++ ++static uint32_t radeon_mem_prios[] = {DRM_BO_MEM_VRAM, DRM_BO_MEM_TT, DRM_BO_MEM_LOCAL}; ++static uint32_t radeon_busy_prios[] = {DRM_BO_MEM_TT, DRM_BO_MEM_VRAM, DRM_BO_MEM_LOCAL}; ++ ++static struct drm_bo_driver radeon_bo_driver = { ++ .mem_type_prio = radeon_mem_prios, ++ .mem_busy_prio = radeon_busy_prios, ++ .num_mem_type_prio = sizeof(radeon_mem_prios)/sizeof(uint32_t), ++ .num_mem_busy_prio = sizeof(radeon_busy_prios)/sizeof(uint32_t), ++ .create_ttm_backend_entry = radeon_create_ttm_backend_entry, ++ .fence_type = radeon_fence_types, ++ .invalidate_caches = radeon_invalidate_caches, ++ .init_mem_type = radeon_init_mem_type, ++ .move = radeon_move, ++ .evict_flags = radeon_evict_flags, ++}; ++ + static struct drm_driver driver = { + .driver_features = + DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG | +- DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED, ++ DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED | DRIVER_GEM, + .dev_priv_size = sizeof(drm_radeon_buf_priv_t), + .load = radeon_driver_load, + .firstopen = radeon_driver_firstopen, +@@ -106,7 +130,13 @@ static struct drm_driver driver = { + .get_map_ofs = drm_core_get_map_ofs, + .get_reg_ofs = drm_core_get_reg_ofs, + .ioctls = radeon_ioctls, ++ .gem_init_object = radeon_gem_init_object, ++ .gem_free_object = radeon_gem_free_object, + .dma_ioctl = radeon_cp_buffers, ++ .master_create = radeon_master_create, ++ .master_destroy = radeon_master_destroy, ++ .proc_init = radeon_gem_proc_init, ++ .proc_cleanup = radeon_gem_proc_cleanup, + .fops = { + .owner = THIS_MODULE, + .open = drm_open, +@@ -125,6 +155,9 @@ static struct drm_driver driver = { + .id_table = pciidlist, + }, + ++ .fence_driver = &radeon_fence_driver, ++ .bo_driver = &radeon_bo_driver, ++ + .name = DRIVER_NAME, + .desc = DRIVER_DESC, + .date = DRIVER_DATE, +@@ -136,6 +169,29 @@ static struct drm_driver driver = { + static int __init radeon_init(void) + { + driver.num_ioctls = radeon_max_ioctl; ++ ++ /* if enabled by default */ ++#if defined(CONFIG_DRM_RADEON_KMS) && defined(CONFIG_X86) ++ driver.driver_features |= DRIVER_MODESET; ++ if (radeon_modeset == 0) ++ driver.driver_features &= ~DRIVER_MODESET; ++#else ++ if (radeon_modeset == -1) ++ radeon_modeset = 0; ++#endif ++ ++ if (radeon_modeset == 1) ++ driver.driver_features |= DRIVER_MODESET; ++ ++ /* if the vga console setting is enabled still ++ * let modprobe override it */ ++#ifdef CONFIG_VGA_CONSOLE ++ if (vgacon_text_force() && radeon_modeset == -1) { ++ driver.driver_features &= ~DRIVER_MODESET; ++ radeon_modeset = 0; ++ } ++#endif ++ + return drm_init(&driver); + } + +diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h +index 490bc7c..c2f529b 100644 +--- a/drivers/gpu/drm/radeon/radeon_drv.h ++++ b/drivers/gpu/drm/radeon/radeon_drv.h +@@ -34,6 +34,8 @@ + /* General customization: + */ + ++#include "atom.h" ++ + #define DRIVER_AUTHOR "Gareth Hughes, Keith Whitwell, others." + + #define DRIVER_NAME "radeon" +@@ -126,6 +128,7 @@ enum radeon_family { + CHIP_RV410, + CHIP_RS400, + CHIP_RS480, ++ CHIP_RS600, + CHIP_RS690, + CHIP_RS740, + CHIP_RV515, +@@ -134,15 +137,18 @@ enum radeon_family { + CHIP_RV560, + CHIP_RV570, + CHIP_R580, ++ CHIP_R600, ++ CHIP_R630, ++ CHIP_RV610, ++ CHIP_RV630, ++ CHIP_RV670, ++ CHIP_RV620, ++ CHIP_RV635, ++ CHIP_RS780, ++ CHIP_RV770, + CHIP_LAST, + }; + +-enum radeon_cp_microcode_version { +- UCODE_R100, +- UCODE_R200, +- UCODE_R300, +-}; +- + /* + * Chip flags + */ +@@ -160,9 +166,42 @@ enum radeon_chip_flags { + RADEON_IS_IGPGART = 0x01000000UL, + }; + ++/* ++ * Errata workarounds ++ */ ++enum radeon_pll_errata { ++ CHIP_ERRATA_R300_CG = 0x00000001, ++ CHIP_ERRATA_PLL_DUMMYREADS = 0x00000002, ++ CHIP_ERRATA_PLL_DELAY = 0x00000004 ++}; ++ ++enum radeon_ext_tmds_chip { ++ RADEON_DVOCHIP_NONE, ++ RADEON_SIL_164, ++ RADEON_SIL_1178 ++}; ++ ++#if defined(__powerpc__) ++enum radeon_mac_model { ++ RADEON_MAC_NONE, ++ RADEON_MAC_IBOOK, ++ RADEON_MAC_POWERBOOK_EXTERNAL, ++ RADEON_MAC_POWERBOOK_INTERNAL, ++ RADEON_MAC_POWERBOOK_VGA, ++ RADEON_MAC_MINI_EXTERNAL, ++ RADEON_MAC_MINI_INTERNAL, ++ RADEON_MAC_IMAC_G5_ISIGHT ++}; ++#endif ++ ++ + #define GET_RING_HEAD(dev_priv) (dev_priv->writeback_works ? \ +- DRM_READ32( (dev_priv)->ring_rptr, 0 ) : RADEON_READ(RADEON_CP_RB_RPTR)) +-#define SET_RING_HEAD(dev_priv,val) DRM_WRITE32( (dev_priv)->ring_rptr, 0, (val) ) ++ (dev_priv->mm.ring_read.bo ? readl(dev_priv->mm.ring_read.kmap.virtual + 0) : DRM_READ32((dev_priv)->ring_rptr, 0 )) : \ ++ RADEON_READ(RADEON_CP_RB_RPTR)) ++ ++#define SET_RING_HEAD(dev_priv,val) (dev_priv->mm.ring_read.bo ? \ ++ writel((val), dev_priv->mm.ring_read.kmap.virtual) : \ ++ DRM_WRITE32((dev_priv)->ring_rptr, 0, (val))) + + typedef struct drm_radeon_freelist { + unsigned int age; +@@ -180,7 +219,6 @@ typedef struct drm_radeon_ring_buffer { + int rptr_update; /* Double Words */ + int rptr_update_l2qw; /* log2 Quad Words */ + +- int fetch_size; /* Double Words */ + int fetch_size_l2ow; /* log2 Oct Words */ + + u32 tail; +@@ -226,18 +264,89 @@ struct radeon_virt_surface { + #define RADEON_FLUSH_EMITED (1 < 0) + #define RADEON_PURGE_EMITED (1 < 1) + ++struct radeon_mm_obj { ++ struct drm_buffer_object *bo; ++ struct drm_bo_kmap_obj kmap; ++}; ++ ++struct radeon_mm_info { ++ uint64_t vram_offset; // Offset into GPU space ++ uint64_t vram_size; ++ uint64_t vram_visible; ++ ++ uint64_t gart_start; ++ uint64_t gart_size; ++ ++ uint64_t gart_useable; ++ ++ void *pcie_table_backup; ++ ++ struct radeon_mm_obj pcie_table; ++ struct radeon_mm_obj ring; ++ struct radeon_mm_obj ring_read; ++ ++ struct radeon_mm_obj dma_bufs; ++ struct drm_map fake_agp_map; ++}; ++ ++#include "radeon_mode.h" ++ + struct drm_radeon_master_private { + drm_local_map_t *sarea; + drm_radeon_sarea_t *sarea_priv; + }; + ++struct drm_radeon_kernel_chunk { ++ uint32_t chunk_id; ++ uint32_t length_dw; ++ uint32_t __user *chunk_data; ++ uint32_t *kdata; ++}; ++ ++struct drm_radeon_cs_parser { ++ struct drm_device *dev; ++ struct drm_file *file_priv; ++ uint32_t num_chunks; ++ struct drm_radeon_kernel_chunk *chunks; ++ int ib_index; ++ int reloc_index; ++ uint32_t card_offset; ++ void *ib; ++}; ++ ++/* command submission struct */ ++struct drm_radeon_cs_priv { ++ uint32_t id_wcnt; ++ uint32_t id_scnt; ++ uint32_t id_last_wcnt; ++ uint32_t id_last_scnt; ++ ++ int (*parse)(struct drm_radeon_cs_parser *parser); ++ void (*id_emit)(struct drm_radeon_cs_parser *parser, uint32_t *id); ++ uint32_t (*id_last_get)(struct drm_device *dev); ++ /* this ib handling callback are for hidding memory manager drm ++ * from memory manager less drm, free have to emit ib discard ++ * sequence into the ring */ ++ int (*ib_get)(struct drm_radeon_cs_parser *parser); ++ uint32_t (*ib_get_ptr)(struct drm_device *dev, void *ib); ++ void (*ib_free)(struct drm_radeon_cs_parser *parser, int error); ++ /* do a relocation either MM or non-MM */ ++ int (*relocate)(struct drm_radeon_cs_parser *parser, ++ uint32_t *reloc, uint32_t *offset); ++}; ++ ++ ++ ++struct radeon_pm_regs { ++ uint32_t crtc_ext_cntl; ++ uint32_t bios_scratch[8]; ++}; ++ + typedef struct drm_radeon_private { + drm_radeon_ring_buffer_t ring; + +- u32 fb_location; +- u32 fb_size; +- int new_memmap; +- ++ bool new_memmap; ++ bool user_mm_enable; /* userspace enabled the memory manager */ + int gart_size; + u32 gart_vm_start; + unsigned long gart_buffers_offset; +@@ -253,8 +362,6 @@ typedef struct drm_radeon_private { + + int usec_timeout; + +- int microcode_version; +- + struct { + u32 boxes; + int freelist_timeouts; +@@ -290,7 +397,6 @@ typedef struct drm_radeon_private { + unsigned long buffers_offset; + unsigned long gart_textures_offset; + +- drm_local_map_t *sarea; + drm_local_map_t *cp_ring; + drm_local_map_t *ring_rptr; + drm_local_map_t *gart_textures; +@@ -299,8 +405,8 @@ typedef struct drm_radeon_private { + struct mem_block *fb_heap; + + /* SW interrupt */ ++ int counter; + wait_queue_head_t swi_queue; +- atomic_t swi_emitted; + int vblank_crtc; + uint32_t irq_enable_reg; + uint32_t r500_disp_irq_reg; +@@ -308,9 +414,6 @@ typedef struct drm_radeon_private { + struct radeon_surface surfaces[RADEON_MAX_SURFACES]; + struct radeon_virt_surface virt_surfaces[2 * RADEON_MAX_SURFACES]; + +- unsigned long pcigart_offset; +- unsigned int pcigart_offset_set; +- struct drm_ati_pcigart_info gart_info; + + u32 scratch_ages[5]; + +@@ -320,7 +423,44 @@ typedef struct drm_radeon_private { + + int num_gb_pipes; + int track_flush; ++ ++ bool mm_enabled; ++ struct radeon_mm_info mm; + drm_local_map_t *mmio; ++ ++ uint32_t chip_family; ++ ++ unsigned long pcigart_offset; ++ unsigned int pcigart_offset_set; ++ struct drm_ati_pcigart_info gart_info; ++ ++ struct radeon_mode_info mode_info; ++ ++ uint8_t *bios; /* copy of the BIOS image */ ++ bool is_atom_bios; ++ uint16_t bios_header_start; ++ u32 fb_location; ++ u32 fb_size; ++ bool is_ddr; ++ u32 ram_width; ++ ++ uint32_t mc_fb_location; ++ uint32_t mc_agp_loc_lo; ++ uint32_t mc_agp_loc_hi; ++ ++ enum radeon_pll_errata pll_errata; ++ ++ struct radeon_mm_obj **ib_objs; ++ /* ib bitmap */ ++ uint64_t ib_alloc_bitmap; // TO DO replace with a real bitmap ++ struct drm_radeon_cs_priv cs; ++ ++ struct radeon_pm_regs pmregs; ++ int irq_emitted; ++ atomic_t irq_received; ++ ++ uint32_t aper_size; ++ int vram_mtrr; + } drm_radeon_private_t; + + typedef struct drm_radeon_buf_priv { +@@ -335,8 +475,14 @@ typedef struct drm_radeon_kcmd_buffer { + } drm_radeon_kcmd_buffer_t; + + extern int radeon_no_wb; ++extern int radeon_dynclks; ++extern int radeon_r4xx_atom; + extern struct drm_ioctl_desc radeon_ioctls[]; + extern int radeon_max_ioctl; ++extern int radeon_agpmode; ++extern int radeon_modeset; ++extern int radeon_vram_zero; ++extern int radeon_gart_size; + + /* Check whether the given hardware address is inside the framebuffer or the + * GART area. +@@ -370,12 +516,9 @@ extern struct drm_buf *radeon_freelist_get(struct drm_device * dev); + + extern int radeon_wait_ring(drm_radeon_private_t * dev_priv, int n); + ++extern int radeon_do_wait_for_idle(drm_radeon_private_t * dev_priv); + extern int radeon_do_cp_idle(drm_radeon_private_t * dev_priv); + +-extern int radeon_driver_preinit(struct drm_device *dev, unsigned long flags); +-extern int radeon_presetup(struct drm_device *dev); +-extern int radeon_driver_postcleanup(struct drm_device *dev); +- + extern int radeon_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv); + extern int radeon_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv); + extern int radeon_mem_init_heap(struct drm_device *dev, void *data, struct drm_file *file_priv); +@@ -403,13 +546,13 @@ extern int radeon_vblank_crtc_set(struct drm_device *dev, int64_t value); + extern int radeon_driver_load(struct drm_device *dev, unsigned long flags); + extern int radeon_driver_unload(struct drm_device *dev); + extern int radeon_driver_firstopen(struct drm_device *dev); +-extern void radeon_driver_preclose(struct drm_device *dev, ++extern void radeon_driver_preclose(struct drm_device * dev, + struct drm_file *file_priv); +-extern void radeon_driver_postclose(struct drm_device *dev, ++extern void radeon_driver_postclose(struct drm_device * dev, + struct drm_file *file_priv); + extern void radeon_driver_lastclose(struct drm_device * dev); +-extern int radeon_driver_open(struct drm_device *dev, +- struct drm_file *file_priv); ++extern int radeon_driver_open(struct drm_device * dev, ++ struct drm_file * file_priv); + extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg); + +@@ -423,6 +566,11 @@ extern int r300_do_cp_cmdbuf(struct drm_device *dev, + struct drm_file *file_priv, + drm_radeon_kcmd_buffer_t *cmdbuf); + ++extern int radeon_modeset_cp_suspend(struct drm_device *dev); ++extern int radeon_modeset_cp_resume(struct drm_device *dev); ++/* radeon_pm.c */ ++int radeon_suspend(struct drm_device *dev, pm_message_t state); ++int radeon_resume(struct drm_device *dev); + /* Flags for stats.boxes + */ + #define RADEON_BOX_DMA_IDLE 0x1 +@@ -431,10 +579,14 @@ extern int r300_do_cp_cmdbuf(struct drm_device *dev, + #define RADEON_BOX_WAIT_IDLE 0x8 + #define RADEON_BOX_TEXTURE_LOAD 0x10 + ++#define R600_CONFIG_MEMSIZE 0x5428 ++#define R600_CONFIG_APER_SIZE 0x5430 + /* Register definitions, register access macros and drmAddMap constants + * for Radeon kernel driver. + */ + ++#include "radeon_reg.h" ++ + #define RADEON_AGP_COMMAND 0x0f60 + #define RADEON_AGP_COMMAND_PCI_CONFIG 0x0060 /* offset in PCI config */ + # define RADEON_AGP_ENABLE (1<<8) +@@ -560,16 +712,6 @@ extern int r300_do_cp_cmdbuf(struct drm_device *dev, + #define R520_MC_IND_WR_EN (1 << 24) + #define R520_MC_IND_DATA 0x74 + +-#define RV515_MC_FB_LOCATION 0x01 +-#define RV515_MC_AGP_LOCATION 0x02 +-#define RV515_MC_AGP_BASE 0x03 +-#define RV515_MC_AGP_BASE_2 0x04 +- +-#define R520_MC_FB_LOCATION 0x04 +-#define R520_MC_AGP_LOCATION 0x05 +-#define R520_MC_AGP_BASE 0x06 +-#define R520_MC_AGP_BASE_2 0x07 +- + #define RADEON_MPP_TB_CONFIG 0x01c0 + #define RADEON_MEM_CNTL 0x0140 + #define RADEON_MEM_SDRAM_MODE_REG 0x0158 +@@ -634,14 +776,23 @@ extern int r300_do_cp_cmdbuf(struct drm_device *dev, + #define RADEON_SCRATCH_REG3 0x15ec + #define RADEON_SCRATCH_REG4 0x15f0 + #define RADEON_SCRATCH_REG5 0x15f4 ++#define RADEON_SCRATCH_REG6 0x15f8 + #define RADEON_SCRATCH_UMSK 0x0770 + #define RADEON_SCRATCH_ADDR 0x0774 + + #define RADEON_SCRATCHOFF( x ) (RADEON_SCRATCH_REG_OFFSET + 4*(x)) + +-#define GET_SCRATCH( x ) (dev_priv->writeback_works \ +- ? DRM_READ32( dev_priv->ring_rptr, RADEON_SCRATCHOFF(x) ) \ +- : RADEON_READ( RADEON_SCRATCH_REG0 + 4*(x) ) ) ++#define GET_SCRATCH( x ) (dev_priv->writeback_works ? \ ++ (dev_priv->mm.ring_read.bo ? \ ++ readl(dev_priv->mm.ring_read.kmap.virtual + RADEON_SCRATCHOFF(x)) : \ ++ DRM_READ32(dev_priv->ring_rptr, RADEON_SCRATCHOFF(x))) : \ ++ RADEON_READ( RADEON_SCRATCH_REG0 + 4*(x))) ++ ++#define RADEON_CRTC_CRNT_FRAME 0x0214 ++#define RADEON_CRTC2_CRNT_FRAME 0x0314 ++ ++#define RADEON_CRTC_STATUS 0x005c ++#define RADEON_CRTC2_STATUS 0x03fc + + #define RADEON_GEN_INT_CNTL 0x0040 + # define RADEON_CRTC_VBLANK_MASK (1 << 0) +@@ -660,10 +811,13 @@ extern int r300_do_cp_cmdbuf(struct drm_device *dev, + # define RADEON_SW_INT_FIRE (1 << 26) + # define R500_DISPLAY_INT_STATUS (1 << 0) + +-#define RADEON_HOST_PATH_CNTL 0x0130 +-# define RADEON_HDP_SOFT_RESET (1 << 26) +-# define RADEON_HDP_WC_TIMEOUT_MASK (7 << 28) +-# define RADEON_HDP_WC_TIMEOUT_28BCLK (7 << 28) ++#define RADEON_HOST_PATH_CNTL 0x0130 ++# define RADEON_HDP_APER_CNTL (1 << 23) ++# define RADEON_HP_LIN_RD_CACHE_DIS (1 << 24) ++# define RADEON_HDP_SOFT_RESET (1 << 26) ++# define RADEON_HDP_READ_BUFFER_INVALIDATED (1 << 27) ++ ++#define RADEON_NB_TOM 0x15c + + #define RADEON_ISYNC_CNTL 0x1724 + # define RADEON_ISYNC_ANY2D_IDLE3D (1 << 0) +@@ -702,12 +856,17 @@ extern int r300_do_cp_cmdbuf(struct drm_device *dev, + #define RADEON_PP_TXFILTER_1 0x1c6c + #define RADEON_PP_TXFILTER_2 0x1c84 + +-#define R300_RB2D_DSTCACHE_CTLSTAT 0x342c /* use R300_DSTCACHE_CTLSTAT */ +-#define R300_DSTCACHE_CTLSTAT 0x1714 +-# define R300_RB2D_DC_FLUSH (3 << 0) +-# define R300_RB2D_DC_FREE (3 << 2) +-# define R300_RB2D_DC_FLUSH_ALL 0xf +-# define R300_RB2D_DC_BUSY (1 << 31) ++#define R300_RB2D_DSTCACHE_CTLSTAT 0x342c /* use R300_DSTCACHE_CTLSTAT */ ++#define R300_DSTCACHE_CTLSTAT 0x1714 ++# define R300_RB2D_DC_FLUSH (3 << 0) ++# define R300_RB2D_DC_FREE (3 << 2) ++//# define R300_RB2D_DC_FLUSH_ALL 0xf ++# define R300_RB2D_DC_BUSY (1 << 31) ++#define RADEON_RB2D_DSTCACHE_CTLSTAT 0x342c ++# define RADEON_RB2D_DC_FLUSH (3 << 0) ++# define RADEON_RB2D_DC_FREE (3 << 2) ++# define RADEON_RB2D_DC_FLUSH_ALL 0xf ++# define RADEON_RB2D_DC_BUSY (1 << 31) + #define RADEON_RB3D_CNTL 0x1c3c + # define RADEON_ALPHA_BLEND_ENABLE (1 << 0) + # define RADEON_PLANE_MASK_ENABLE (1 << 1) +@@ -734,11 +893,6 @@ extern int r300_do_cp_cmdbuf(struct drm_device *dev, + # define R300_ZC_FLUSH (1 << 0) + # define R300_ZC_FREE (1 << 1) + # define R300_ZC_BUSY (1 << 31) +-#define RADEON_RB3D_DSTCACHE_CTLSTAT 0x325c +-# define RADEON_RB3D_DC_FLUSH (3 << 0) +-# define RADEON_RB3D_DC_FREE (3 << 2) +-# define RADEON_RB3D_DC_FLUSH_ALL 0xf +-# define RADEON_RB3D_DC_BUSY (1 << 31) + #define R300_RB3D_DSTCACHE_CTLSTAT 0x4e4c + # define R300_RB3D_DC_FLUSH (2 << 0) + # define R300_RB3D_DC_FREE (2 << 2) +@@ -746,15 +900,15 @@ extern int r300_do_cp_cmdbuf(struct drm_device *dev, + #define RADEON_RB3D_ZSTENCILCNTL 0x1c2c + # define RADEON_Z_TEST_MASK (7 << 4) + # define RADEON_Z_TEST_ALWAYS (7 << 4) +-# define RADEON_Z_HIERARCHY_ENABLE (1 << 8) ++# define RADEON_Z_HIERARCHY_ENABLE (1 << 8) + # define RADEON_STENCIL_TEST_ALWAYS (7 << 12) + # define RADEON_STENCIL_S_FAIL_REPLACE (2 << 16) + # define RADEON_STENCIL_ZPASS_REPLACE (2 << 20) + # define RADEON_STENCIL_ZFAIL_REPLACE (2 << 24) +-# define RADEON_Z_COMPRESSION_ENABLE (1 << 28) +-# define RADEON_FORCE_Z_DIRTY (1 << 29) ++# define RADEON_Z_COMPRESSION_ENABLE (1 << 28) ++# define RADEON_FORCE_Z_DIRTY (1 << 29) + # define RADEON_Z_WRITE_ENABLE (1 << 30) +-# define RADEON_Z_DECOMPRESSION_ENABLE (1 << 31) ++# define RADEON_Z_DECOMPRESSION_ENABLE (1 << 31) + #define RADEON_RBBM_SOFT_RESET 0x00f0 + # define RADEON_SOFT_RESET_CP (1 << 0) + # define RADEON_SOFT_RESET_HI (1 << 1) +@@ -1015,27 +1169,6 @@ extern int r300_do_cp_cmdbuf(struct drm_device *dev, + #define RADEON_NUM_VERTICES_SHIFT 16 + + #define RADEON_COLOR_FORMAT_CI8 2 +-#define RADEON_COLOR_FORMAT_ARGB1555 3 +-#define RADEON_COLOR_FORMAT_RGB565 4 +-#define RADEON_COLOR_FORMAT_ARGB8888 6 +-#define RADEON_COLOR_FORMAT_RGB332 7 +-#define RADEON_COLOR_FORMAT_RGB8 9 +-#define RADEON_COLOR_FORMAT_ARGB4444 15 +- +-#define RADEON_TXFORMAT_I8 0 +-#define RADEON_TXFORMAT_AI88 1 +-#define RADEON_TXFORMAT_RGB332 2 +-#define RADEON_TXFORMAT_ARGB1555 3 +-#define RADEON_TXFORMAT_RGB565 4 +-#define RADEON_TXFORMAT_ARGB4444 5 +-#define RADEON_TXFORMAT_ARGB8888 6 +-#define RADEON_TXFORMAT_RGBA8888 7 +-#define RADEON_TXFORMAT_Y8 8 +-#define RADEON_TXFORMAT_VYUY422 10 +-#define RADEON_TXFORMAT_YVYU422 11 +-#define RADEON_TXFORMAT_DXT1 12 +-#define RADEON_TXFORMAT_DXT23 14 +-#define RADEON_TXFORMAT_DXT45 15 + + #define R200_PP_TXCBLEND_0 0x2f00 + #define R200_PP_TXCBLEND_1 0x2f10 +@@ -1146,16 +1279,44 @@ extern int r300_do_cp_cmdbuf(struct drm_device *dev, + + #define R200_SE_TCL_POINT_SPRITE_CNTL 0x22c4 + +-#define R200_PP_TRI_PERF 0x2cf8 ++#define R200_PP_TRI_PERF 0x2cf8 + + #define R200_PP_AFS_0 0x2f80 +-#define R200_PP_AFS_1 0x2f00 /* same as txcblend_0 */ ++#define R200_PP_AFS_1 0x2f00 /* same as txcblend_0 */ + + #define R200_VAP_PVS_CNTL_1 0x22D0 + + #define RADEON_CRTC_CRNT_FRAME 0x0214 + #define RADEON_CRTC2_CRNT_FRAME 0x0314 + ++/* MPEG settings from VHA code */ ++#define RADEON_VHA_SETTO16_1 0x2694 ++#define RADEON_VHA_SETTO16_2 0x2680 ++#define RADEON_VHA_SETTO0_1 0x1840 ++#define RADEON_VHA_FB_OFFSET 0x19e4 ++#define RADEON_VHA_SETTO1AND70S 0x19d8 ++#define RADEON_VHA_DST_PITCH 0x1408 ++ ++// set as reference header ++#define RADEON_VHA_BACKFRAME0_OFF_Y 0x1840 ++#define RADEON_VHA_BACKFRAME1_OFF_PITCH_Y 0x1844 ++#define RADEON_VHA_BACKFRAME0_OFF_U 0x1848 ++#define RADEON_VHA_BACKFRAME1_OFF_PITCH_U 0x184c ++#define RADOEN_VHA_BACKFRAME0_OFF_V 0x1850 ++#define RADEON_VHA_BACKFRAME1_OFF_PITCH_V 0x1854 ++#define RADEON_VHA_FORWFRAME0_OFF_Y 0x1858 ++#define RADEON_VHA_FORWFRAME1_OFF_PITCH_Y 0x185c ++#define RADEON_VHA_FORWFRAME0_OFF_U 0x1860 ++#define RADEON_VHA_FORWFRAME1_OFF_PITCH_U 0x1864 ++#define RADEON_VHA_FORWFRAME0_OFF_V 0x1868 ++#define RADEON_VHA_FORWFRAME0_OFF_PITCH_V 0x1880 ++#define RADEON_VHA_BACKFRAME0_OFF_Y_2 0x1884 ++#define RADEON_VHA_BACKFRAME1_OFF_PITCH_Y_2 0x1888 ++#define RADEON_VHA_BACKFRAME0_OFF_U_2 0x188c ++#define RADEON_VHA_BACKFRAME1_OFF_PITCH_U_2 0x1890 ++#define RADEON_VHA_BACKFRAME0_OFF_V_2 0x1894 ++#define RADEON_VHA_BACKFRAME1_OFF_PITCH_V_2 0x1898 ++ + #define R500_D1CRTC_STATUS 0x609c + #define R500_D2CRTC_STATUS 0x689c + #define R500_CRTC_V_BLANK (1<<0) +@@ -1196,19 +1357,36 @@ extern int r300_do_cp_cmdbuf(struct drm_device *dev, + #define RADEON_RING_HIGH_MARK 128 + + #define RADEON_PCIGART_TABLE_SIZE (32*1024) ++#define RADEON_DEFAULT_RING_SIZE (1024*1024) ++#define RADEON_DEFAULT_CP_TIMEOUT 100000 /* usecs */ + +-#define RADEON_READ(reg) DRM_READ32( dev_priv->mmio, (reg) ) +-#define RADEON_WRITE(reg,val) DRM_WRITE32( dev_priv->mmio, (reg), (val) ) ++#define RADEON_READ(reg) DRM_READ32( dev_priv->mmio, (reg) ) ++#define RADEON_WRITE(reg,val) DRM_WRITE32( dev_priv->mmio, (reg), (val) ) + #define RADEON_READ8(reg) DRM_READ8( dev_priv->mmio, (reg) ) + #define RADEON_WRITE8(reg,val) DRM_WRITE8( dev_priv->mmio, (reg), (val) ) + +-#define RADEON_WRITE_PLL(addr, val) \ +-do { \ +- RADEON_WRITE8(RADEON_CLOCK_CNTL_INDEX, \ +- ((addr) & 0x1f) | RADEON_PLL_WR_EN ); \ +- RADEON_WRITE(RADEON_CLOCK_CNTL_DATA, (val)); \ ++extern u32 RADEON_READ_PLL(struct drm_radeon_private *dev_priv, int addr); ++extern void RADEON_WRITE_PLL(struct drm_radeon_private *dev_priv, int addr, uint32_t data); ++extern u32 RADEON_READ_PCIE(drm_radeon_private_t *dev_priv, int addr); ++ ++#define RADEON_WRITE_P(reg, val, mask) \ ++do { \ ++ uint32_t tmp = RADEON_READ(reg); \ ++ tmp &= (mask); \ ++ tmp |= ((val) & ~(mask)); \ ++ RADEON_WRITE(reg, tmp); \ ++} while(0) ++ ++#define RADEON_WRITE_PLL_P(dev_priv, addr, val, mask) \ ++do { \ ++ uint32_t tmp_ = RADEON_READ_PLL(dev_priv, addr); \ ++ tmp_ &= (mask); \ ++ tmp_ |= ((val) & ~(mask)); \ ++ RADEON_WRITE_PLL(dev_priv, addr, tmp_); \ + } while (0) + ++ ++ + #define RADEON_WRITE_PCIE(addr, val) \ + do { \ + RADEON_WRITE8(RADEON_PCIE_INDEX, \ +@@ -1265,7 +1443,7 @@ do { \ + #define RADEON_WAIT_UNTIL_2D_IDLE() do { \ + OUT_RING( CP_PACKET0( RADEON_WAIT_UNTIL, 0 ) ); \ + OUT_RING( (RADEON_WAIT_2D_IDLECLEAN | \ +- RADEON_WAIT_HOST_IDLECLEAN) ); \ ++ RADEON_WAIT_HOST_IDLECLEAN | RADEON_WAIT_DMA_GUI_IDLE) ); \ + } while (0) + + #define RADEON_WAIT_UNTIL_3D_IDLE() do { \ +@@ -1374,15 +1552,16 @@ do { \ + + #define RADEON_VERBOSE 0 + +-#define RING_LOCALS int write, _nr; unsigned int mask; u32 *ring; ++#define RING_LOCALS int write, _nr, _align_nr; unsigned int mask; u32 *ring; + + #define BEGIN_RING( n ) do { \ + if ( RADEON_VERBOSE ) { \ + DRM_INFO( "BEGIN_RING( %d )\n", (n)); \ + } \ +- if ( dev_priv->ring.space <= (n) * sizeof(u32) ) { \ ++ _align_nr = (n + 0xf) & ~0xf; \ ++ if (dev_priv->ring.space <= (_align_nr * sizeof(u32))) { \ + COMMIT_RING(); \ +- radeon_wait_ring( dev_priv, (n) * sizeof(u32) ); \ ++ radeon_wait_ring( dev_priv, _align_nr * sizeof(u32)); \ + } \ + _nr = n; dev_priv->ring.space -= (n) * sizeof(u32); \ + ring = dev_priv->ring.start; \ +@@ -1399,19 +1578,14 @@ do { \ + DRM_ERROR( \ + "ADVANCE_RING(): mismatch: nr: %x write: %x line: %d\n", \ + ((dev_priv->ring.tail + _nr) & mask), \ +- write, __LINE__); \ ++ write, __LINE__); \ + } else \ + dev_priv->ring.tail = write; \ + } while (0) + + #define COMMIT_RING() do { \ +- /* Flush writes to ring */ \ +- DRM_MEMORYBARRIER(); \ +- GET_RING_HEAD( dev_priv ); \ +- RADEON_WRITE( RADEON_CP_RB_WPTR, dev_priv->ring.tail ); \ +- /* read from PCI bus to ensure correct posting */ \ +- RADEON_READ( RADEON_CP_RB_RPTR ); \ +-} while (0) ++ radeon_commit_ring(dev_priv); \ ++ } while(0) + + #define OUT_RING( x ) do { \ + if ( RADEON_VERBOSE ) { \ +@@ -1450,4 +1624,150 @@ do { \ + write &= mask; \ + } while (0) + ++/* radeon GEM->TTM munger */ ++struct drm_radeon_gem_object { ++ /* wrap a TTM bo */ ++ struct drm_buffer_object *bo; ++ struct drm_fence_object *fence; ++ struct drm_gem_object *obj; ++ ++}; ++ ++extern int radeon_gem_info_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++ ++extern int radeon_gem_create_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++ ++extern int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++ ++extern int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int radeon_gem_pread_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++ ++extern void radeon_fence_handler(struct drm_device *dev); ++extern int radeon_fence_emit_sequence(struct drm_device *dev, uint32_t class, ++ uint32_t flags, uint32_t *sequence, ++ uint32_t *native_type); ++extern void radeon_poke_flush(struct drm_device *dev, uint32_t class); ++extern int radeon_fence_has_irq(struct drm_device *dev, uint32_t class, uint32_t flags); ++ ++/* radeon_buffer.c */ ++extern struct drm_ttm_backend *radeon_create_ttm_backend_entry(struct drm_device *dev); ++extern int radeon_fence_types(struct drm_buffer_object *bo, uint32_t *class, uint32_t *type); ++extern int radeon_invalidate_caches(struct drm_device *dev, uint64_t buffer_flags); ++extern int radeon_init_mem_type(struct drm_device * dev, uint32_t type, ++ struct drm_mem_type_manager * man); ++extern int radeon_move(struct drm_buffer_object * bo, ++ int evict, int no_wait, struct drm_bo_mem_reg * new_mem); ++ ++extern void radeon_gart_flush(struct drm_device *dev); ++extern uint64_t radeon_evict_flags(struct drm_buffer_object *bo); ++ ++#define BREADCRUMB_BITS 31 ++#define BREADCRUMB_MASK ((1U << BREADCRUMB_BITS) - 1) ++ ++/* Breadcrumb - swi irq */ ++#define READ_BREADCRUMB(dev_priv) GET_SCRATCH(3) ++ ++static inline int radeon_update_breadcrumb(struct drm_device *dev) ++{ ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ struct drm_radeon_master_private *master_priv; ++ ++ ++dev_priv->counter; ++ if (dev_priv->counter > BREADCRUMB_MASK) ++ dev_priv->counter = 1; ++ ++ if (dev->primary->master) { ++ master_priv = dev->primary->master->driver_priv; ++ ++ if (master_priv->sarea_priv) ++ master_priv->sarea_priv->last_fence = dev_priv->counter; ++ } ++ return dev_priv->counter; ++} ++ ++#define radeon_is_avivo(dev_priv) ((dev_priv->chip_family >= CHIP_RS600)) ++ ++#define radeon_is_dce3(dev_priv) ((dev_priv->chip_family >= CHIP_RV620)) ++ ++#define radeon_is_rv100(dev_priv) ((dev_priv->chip_family == CHIP_RV100) || \ ++ (dev_priv->chip_family == CHIP_RV200) || \ ++ (dev_priv->chip_family == CHIP_RS100) || \ ++ (dev_priv->chip_family == CHIP_RS200) || \ ++ (dev_priv->chip_family == CHIP_RV250) || \ ++ (dev_priv->chip_family == CHIP_RV280) || \ ++ (dev_priv->chip_family == CHIP_RS300)) ++ ++#define radeon_is_r300(dev_priv) ((dev_priv->chip_family == CHIP_R300) || \ ++ (dev_priv->chip_family == CHIP_RV350) || \ ++ (dev_priv->chip_family == CHIP_R350) || \ ++ (dev_priv->chip_family == CHIP_RV380) || \ ++ (dev_priv->chip_family == CHIP_R420) || \ ++ (dev_priv->chip_family == CHIP_R423) || \ ++ (dev_priv->chip_family == CHIP_RV410) || \ ++ (dev_priv->chip_family == CHIP_RS400) || \ ++ (dev_priv->chip_family == CHIP_RS480)) ++ ++#define radeon_bios8(dev_priv, v) (dev_priv->bios[v]) ++#define radeon_bios16(dev_priv, v) (dev_priv->bios[v] | (dev_priv->bios[(v) + 1] << 8)) ++#define radeon_bios32(dev_priv, v) ((dev_priv->bios[v]) | \ ++ (dev_priv->bios[(v) + 1] << 8) | \ ++ (dev_priv->bios[(v) + 2] << 16) | \ ++ (dev_priv->bios[(v) + 3] << 24)) ++ ++extern void radeon_pll_errata_after_index(struct drm_radeon_private *dev_priv); ++extern int radeon_emit_irq(struct drm_device * dev); ++ ++extern void radeon_gem_free_object(struct drm_gem_object *obj); ++extern int radeon_gem_init_object(struct drm_gem_object *obj); ++extern int radeon_gem_mm_init(struct drm_device *dev); ++extern void radeon_gem_mm_fini(struct drm_device *dev); ++extern int radeon_gem_pin_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int radeon_gem_unpin_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++int radeon_gem_object_pin(struct drm_gem_object *obj, ++ uint32_t alignment, uint32_t pin_domain); ++int radeon_gem_object_unpin(struct drm_gem_object *obj); ++int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++int radeon_gem_wait_rendering(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++struct drm_gem_object *radeon_gem_object_alloc(struct drm_device *dev, int size, int alignment, ++ int initial_domain, bool discardable); ++int radeon_modeset_init(struct drm_device *dev); ++void radeon_modeset_cleanup(struct drm_device *dev); ++extern u32 radeon_read_mc_reg(drm_radeon_private_t *dev_priv, int addr); ++extern void radeon_write_mc_reg(drm_radeon_private_t *dev_priv, u32 addr, u32 val); ++void radeon_read_agp_location(drm_radeon_private_t *dev_priv, u32 *agp_lo, u32 *agp_hi); ++void radeon_write_fb_location(drm_radeon_private_t *dev_priv, u32 fb_loc); ++extern void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on); ++#define RADEONFB_CONN_LIMIT 4 ++ ++extern int radeon_master_create(struct drm_device *dev, struct drm_master *master); ++extern void radeon_master_destroy(struct drm_device *dev, struct drm_master *master); ++extern void radeon_cp_dispatch_flip(struct drm_device * dev, struct drm_master *master); ++extern int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *fpriv); ++extern int radeon_cs_init(struct drm_device *dev); ++void radeon_gem_update_offsets(struct drm_device *dev, struct drm_master *master); ++void radeon_init_memory_map(struct drm_device *dev); ++void radeon_enable_bm(struct drm_radeon_private *dev_priv); ++ ++extern int radeon_gem_proc_init(struct drm_minor *minor); ++extern void radeon_gem_proc_cleanup(struct drm_minor *minor); ++#define MARK_SAFE 1 ++#define MARK_CHECK_OFFSET 2 ++#define MARK_CHECK_SCISSOR 3 ++ ++extern void radeon_commit_ring(drm_radeon_private_t *dev_priv); ++ ++extern int r300_check_range(unsigned reg, int count); ++extern int r300_get_reg_flags(unsigned reg); ++int radeon_gem_prelocate(struct drm_radeon_cs_parser *parser); ++void radeon_write_agp_location(drm_radeon_private_t *dev_priv, u32 agp_loc, u32 agp_loc_hi); ++void radeon_write_agp_base(drm_radeon_private_t *dev_priv, u64 agp_base); + #endif /* __RADEON_DRV_H__ */ +diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c +new file mode 100644 +index 0000000..7800035 +--- /dev/null ++++ b/drivers/gpu/drm/radeon/radeon_encoders.c +@@ -0,0 +1,1107 @@ ++/* ++ * Copyright 2007-8 Advanced Micro Devices, Inc. ++ * Copyright 2008 Red Hat Inc. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: Dave Airlie ++ * Alex Deucher ++ */ ++#include "drmP.h" ++#include "drm_crtc_helper.h" ++#include "radeon_drm.h" ++#include "radeon_drv.h" ++ ++extern int atom_debug; ++ ++void radeon_rmx_mode_fixup(struct drm_encoder *encoder, ++ struct drm_display_mode *mode, ++ struct drm_display_mode *adjusted_mode) ++{ ++ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); ++ struct drm_device *dev = encoder->dev; ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ ++ if (mode->hdisplay < radeon_encoder->panel_xres || ++ mode->vdisplay < radeon_encoder->panel_yres) { ++ radeon_encoder->flags |= RADEON_USE_RMX; ++ if (radeon_is_avivo(dev_priv)) { ++ adjusted_mode->hdisplay = radeon_encoder->panel_xres; ++ adjusted_mode->vdisplay = radeon_encoder->panel_yres; ++ adjusted_mode->htotal = radeon_encoder->panel_xres + radeon_encoder->hblank; ++ adjusted_mode->hsync_start = radeon_encoder->panel_xres + radeon_encoder->hoverplus; ++ adjusted_mode->hsync_end = adjusted_mode->hsync_start + radeon_encoder->hsync_width; ++ adjusted_mode->vtotal = radeon_encoder->panel_yres + radeon_encoder->vblank; ++ adjusted_mode->vsync_start = radeon_encoder->panel_yres + radeon_encoder->voverplus; ++ adjusted_mode->vsync_end = adjusted_mode->vsync_start + radeon_encoder->vsync_width; ++ /* update crtc values */ ++ drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V); ++ /* adjust crtc values */ ++ adjusted_mode->crtc_hdisplay = radeon_encoder->panel_xres; ++ adjusted_mode->crtc_vdisplay = radeon_encoder->panel_yres; ++ adjusted_mode->crtc_htotal = adjusted_mode->crtc_hdisplay + radeon_encoder->hblank; ++ adjusted_mode->crtc_hsync_start = adjusted_mode->crtc_hdisplay + radeon_encoder->hoverplus; ++ adjusted_mode->crtc_hsync_end = adjusted_mode->crtc_hsync_start + radeon_encoder->hsync_width; ++ adjusted_mode->crtc_vtotal = adjusted_mode->crtc_vdisplay + radeon_encoder->vblank; ++ adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + radeon_encoder->voverplus; ++ adjusted_mode->crtc_vsync_end = adjusted_mode->crtc_vsync_start + radeon_encoder->vsync_width; ++ } else { ++ adjusted_mode->htotal = radeon_encoder->panel_xres + radeon_encoder->hblank; ++ adjusted_mode->hsync_start = radeon_encoder->panel_xres + radeon_encoder->hoverplus; ++ adjusted_mode->hsync_end = adjusted_mode->hsync_start + radeon_encoder->hsync_width; ++ adjusted_mode->vtotal = radeon_encoder->panel_yres + radeon_encoder->vblank; ++ adjusted_mode->vsync_start = radeon_encoder->panel_yres + radeon_encoder->voverplus; ++ adjusted_mode->vsync_end = adjusted_mode->vsync_start + radeon_encoder->vsync_width; ++ /* update crtc values */ ++ drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V); ++ /* adjust crtc values */ ++ adjusted_mode->crtc_htotal = adjusted_mode->crtc_hdisplay + radeon_encoder->hblank; ++ adjusted_mode->crtc_hsync_start = adjusted_mode->crtc_hdisplay + radeon_encoder->hoverplus; ++ adjusted_mode->crtc_hsync_end = adjusted_mode->crtc_hsync_start + radeon_encoder->hsync_width; ++ adjusted_mode->crtc_vtotal = adjusted_mode->crtc_vdisplay + radeon_encoder->vblank; ++ adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + radeon_encoder->voverplus; ++ adjusted_mode->crtc_vsync_end = adjusted_mode->crtc_vsync_start + radeon_encoder->vsync_width; ++ } ++ } ++} ++ ++ ++static int atom_dac_find_atom_type(struct radeon_encoder *radeon_encoder, struct drm_connector *connector) ++{ ++ struct drm_device *dev = radeon_encoder->base.dev; ++ struct drm_connector *connector_find; ++ int atom_type = -1; ++ ++ if (!connector) { ++ list_for_each_entry(connector_find, &dev->mode_config.connector_list, head) { ++ if (connector_find->encoder == &radeon_encoder->base) ++ connector = connector_find; ++ } ++ } ++ if (connector) { ++ /* look for the encoder in the connector list - ++ check if we the DAC is enabled on a VGA or STV/CTV or CV connector */ ++ /* work out the ATOM_DEVICE bits */ ++ switch (connector->connector_type) { ++ case CONNECTOR_VGA: ++ case CONNECTOR_DVI_I: ++ case CONNECTOR_DVI_A: ++ if (radeon_encoder->atom_device & ATOM_DEVICE_CRT1_SUPPORT) ++ atom_type = ATOM_DEVICE_CRT1_INDEX; ++ else if (radeon_encoder->atom_device & ATOM_DEVICE_CRT2_SUPPORT) ++ atom_type = ATOM_DEVICE_CRT2_INDEX; ++ break; ++ case CONNECTOR_STV: ++ case CONNECTOR_CTV: ++ if (radeon_encoder->atom_device & ATOM_DEVICE_TV1_SUPPORT) ++ atom_type = ATOM_DEVICE_TV1_INDEX; ++ break; ++ case CONNECTOR_DIN: ++ if (radeon_encoder->atom_device & ATOM_DEVICE_TV1_SUPPORT) ++ atom_type = ATOM_DEVICE_TV1_INDEX; ++ if (radeon_encoder->atom_device & ATOM_DEVICE_CV_SUPPORT) ++ atom_type = ATOM_DEVICE_CV_INDEX; ++ break; ++ } ++ } ++ ++ return atom_type; ++} ++ ++/* LVTMA encoder for LVDS usage */ ++static void atombios_display_device_control(struct drm_encoder *encoder, int index, uint8_t state) ++{ ++ struct drm_device *dev = encoder->dev; ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION args; ++ ++ memset(&args, 0, sizeof(args)); ++ args.ucAction = state; ++ ++ atom_execute_table(dev_priv->mode_info.atom_context, index, (uint32_t *)&args); ++} ++ ++static void atombios_scaler_setup(struct drm_encoder *encoder, struct drm_display_mode *mode) ++{ ++ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); ++ struct drm_device *dev = encoder->dev; ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); ++ ENABLE_SCALER_PS_ALLOCATION args; ++ int index = GetIndexIntoMasterTable(COMMAND, EnableScaler); ++ ++ /* pre-avivo chips only have 1 scaler */ ++ if (!radeon_is_avivo(dev_priv) && radeon_crtc->crtc_id) ++ return; ++ ++ memset(&args, 0, sizeof(args)); ++ args.ucScaler = radeon_crtc->crtc_id; ++ ++ if (radeon_encoder->flags & RADEON_USE_RMX) { ++ if (radeon_encoder->rmx_type == RMX_FULL) ++ args.ucEnable = ATOM_SCALER_EXPANSION; ++ else if (radeon_encoder->rmx_type == RMX_CENTER) ++ args.ucEnable = ATOM_SCALER_CENTER; ++ } else { ++ if (radeon_is_avivo(dev_priv)) ++ args.ucEnable = ATOM_SCALER_DISABLE; ++ else ++ args.ucEnable = ATOM_SCALER_CENTER; ++ } ++ ++ atom_execute_table(dev_priv->mode_info.atom_context, index, (uint32_t *)&args); ++} ++ ++void atombios_set_crtc_source(struct drm_encoder *encoder, int source) ++{ ++ int index = GetIndexIntoMasterTable(COMMAND, SelectCRTC_Source); ++ struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); ++ struct drm_radeon_private *dev_priv = encoder->dev->dev_private; ++ uint8_t frev, crev; ++ SELECT_CRTC_SOURCE_PS_ALLOCATION crtc_src_param; ++ SELECT_CRTC_SOURCE_PARAMETERS_V2 crtc_src_param2; ++ uint32_t *param = NULL; ++ ++ atom_parse_cmd_header(dev_priv->mode_info.atom_context, index, &frev, &crev); ++ switch (frev) { ++ case 1: { ++ switch (crev) { ++ case 0: ++ case 1: ++ default: ++ memset(&crtc_src_param, 0, sizeof(crtc_src_param)); ++ crtc_src_param.ucCRTC = radeon_crtc->crtc_id; ++ crtc_src_param.ucDevice = source; ++ param = (uint32_t *)&crtc_src_param; ++ break; ++ case 2: ++ memset(&crtc_src_param2, 0, sizeof(crtc_src_param2)); ++ crtc_src_param2.ucCRTC = radeon_crtc->crtc_id; ++ crtc_src_param2.ucEncoderID = source; ++ switch (source) { ++ case ATOM_DEVICE_CRT1_INDEX: ++ case ATOM_DEVICE_CRT2_INDEX: ++ crtc_src_param2.ucEncodeMode = ATOM_ENCODER_MODE_CRT; ++ break; ++ case ATOM_DEVICE_DFP1_INDEX: ++ case ATOM_DEVICE_DFP2_INDEX: ++ case ATOM_DEVICE_DFP3_INDEX: ++ crtc_src_param2.ucEncodeMode = ATOM_ENCODER_MODE_DVI; ++ // TODO ENCODER MODE ++ break; ++ case ATOM_DEVICE_LCD1_INDEX: ++ crtc_src_param2.ucEncodeMode = ATOM_ENCODER_MODE_LVDS; ++ break; ++ case ATOM_DEVICE_TV1_INDEX: ++ crtc_src_param2.ucEncodeMode = ATOM_ENCODER_MODE_TV; ++ break; ++ case ATOM_DEVICE_CV_INDEX: ++ crtc_src_param2.ucEncodeMode = ATOM_ENCODER_MODE_CV; ++ break; ++ } ++ param = (uint32_t *)&crtc_src_param2; ++ break; ++ } ++ } ++ break; ++ default: ++ return; ++ } ++ ++ atom_execute_table(dev_priv->mode_info.atom_context, index, (uint32_t *)param); ++ ++} ++ ++static void radeon_dfp_disable_dither(struct drm_encoder *encoder, int device) ++{ ++ struct drm_device *dev = encoder->dev; ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ ++ if (!radeon_is_avivo(dev_priv)) ++ return; ++ ++ switch (device) { ++ case ATOM_DEVICE_DFP1_INDEX: ++ RADEON_WRITE(AVIVO_TMDSA_BIT_DEPTH_CONTROL, 0); /* TMDSA */ ++ break; ++ case ATOM_DEVICE_DFP2_INDEX: ++ if ((dev_priv->chip_family == CHIP_RS600) || ++ (dev_priv->chip_family == CHIP_RS690) || ++ (dev_priv->chip_family == CHIP_RS740)) ++ RADEON_WRITE(AVIVO_DDIA_BIT_DEPTH_CONTROL, 0); /* DDIA */ ++ else ++ RADEON_WRITE(AVIVO_DVOA_BIT_DEPTH_CONTROL, 0); /* DVO */ ++ break; ++ /*case ATOM_DEVICE_LCD1_INDEX:*/ /* LVDS panels need dither enabled */ ++ case ATOM_DEVICE_DFP3_INDEX: ++ RADEON_WRITE(AVIVO_LVTMA_BIT_DEPTH_CONTROL, 0); /* LVTMA */ ++ break; ++ default: ++ break; ++ } ++} ++ ++ ++static void radeon_lvtma_mode_set(struct drm_encoder *encoder, ++ struct drm_display_mode *mode, ++ struct drm_display_mode *adjusted_mode) ++{ ++ struct drm_device *dev = encoder->dev; ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ LVDS_ENCODER_CONTROL_PS_ALLOCATION args; ++ int index = GetIndexIntoMasterTable(COMMAND, LVDSEncoderControl); ++ ++ memset(&args, 0, sizeof(args)); ++ atombios_scaler_setup(encoder, mode); ++ atombios_set_crtc_source(encoder, ATOM_DEVICE_LCD1_INDEX); ++ ++ args.ucAction = 1; ++ if (adjusted_mode->clock > 165000) ++ args.ucMisc = 1; ++ else ++ args.ucMisc = 0; ++ args.usPixelClock = cpu_to_le16(adjusted_mode->clock / 10); ++ ++ printk("executing set LVDS encoder\n"); ++ atom_execute_table(dev_priv->mode_info.atom_context, index, (uint32_t *)&args); ++} ++ ++ ++static void radeon_lvtma_dpms(struct drm_encoder *encoder, int mode) ++{ ++ struct drm_device *dev = encoder->dev; ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ struct radeon_crtc *radeon_crtc; ++ int index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl); ++ uint32_t bios_2_scratch, bios_3_scratch; ++ int crtc_id = 0; ++ ++ if (encoder->crtc) { ++ struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); ++ crtc_id = radeon_crtc->crtc_id; ++ } ++ ++ if (dev_priv->chip_family >= CHIP_R600) { ++ bios_2_scratch = RADEON_READ(R600_BIOS_2_SCRATCH); ++ bios_3_scratch = RADEON_READ(R600_BIOS_3_SCRATCH); ++ } else { ++ bios_2_scratch = RADEON_READ(RADEON_BIOS_2_SCRATCH); ++ bios_3_scratch = RADEON_READ(RADEON_BIOS_3_SCRATCH); ++ } ++ ++ bios_2_scratch &= ~ATOM_S3_LCD1_CRTC_ACTIVE; ++ bios_3_scratch |= (crtc_id << 17); ++ ++ switch(mode) { ++ case DRM_MODE_DPMS_ON: ++ atombios_display_device_control(encoder, index, ATOM_ENABLE); ++ bios_2_scratch &= ~ATOM_S2_LCD1_DPMS_STATE; ++ bios_3_scratch |= ATOM_S3_LCD1_ACTIVE; ++ break; ++ case DRM_MODE_DPMS_STANDBY: ++ case DRM_MODE_DPMS_SUSPEND: ++ case DRM_MODE_DPMS_OFF: ++ atombios_display_device_control(encoder, index, ATOM_DISABLE); ++ bios_2_scratch |= ATOM_S2_LCD1_DPMS_STATE; ++ bios_3_scratch &= ~ATOM_S3_LCD1_ACTIVE; ++ break; ++ } ++ ++ if (dev_priv->chip_family >= CHIP_R600) { ++ RADEON_WRITE(R600_BIOS_2_SCRATCH, bios_2_scratch); ++ RADEON_WRITE(R600_BIOS_3_SCRATCH, bios_3_scratch); ++ } else { ++ RADEON_WRITE(RADEON_BIOS_2_SCRATCH, bios_2_scratch); ++ RADEON_WRITE(RADEON_BIOS_3_SCRATCH, bios_3_scratch); ++ } ++} ++ ++static bool radeon_lvtma_mode_fixup(struct drm_encoder *encoder, ++ struct drm_display_mode *mode, ++ struct drm_display_mode *adjusted_mode) ++{ ++ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); ++ ++ radeon_encoder->flags &= ~RADEON_USE_RMX; ++ ++ if (radeon_encoder->rmx_type != RMX_OFF) ++ radeon_rmx_mode_fixup(encoder, mode, adjusted_mode); ++ ++ return true; ++} ++ ++static void radeon_lvtma_prepare(struct drm_encoder *encoder) ++{ ++ radeon_atom_output_lock(encoder, true); ++ radeon_lvtma_dpms(encoder, DRM_MODE_DPMS_OFF); ++} ++ ++static void radeon_lvtma_commit(struct drm_encoder *encoder) ++{ ++ radeon_lvtma_dpms(encoder, DRM_MODE_DPMS_ON); ++ radeon_atom_output_lock(encoder, false); ++} ++ ++static const struct drm_encoder_helper_funcs radeon_atom_lvtma_helper_funcs = { ++ .dpms = radeon_lvtma_dpms, ++ .mode_fixup = radeon_lvtma_mode_fixup, ++ .prepare = radeon_lvtma_prepare, ++ .mode_set = radeon_lvtma_mode_set, ++ .commit = radeon_lvtma_commit, ++}; ++ ++void radeon_enc_destroy(struct drm_encoder *encoder) ++{ ++ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); ++ drm_encoder_cleanup(encoder); ++ kfree(radeon_encoder); ++} ++ ++static const struct drm_encoder_funcs radeon_atom_lvtma_enc_funcs = { ++ .destroy = radeon_enc_destroy, ++}; ++ ++struct drm_encoder *radeon_encoder_lvtma_add(struct drm_device *dev, int bios_index) ++{ ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ struct radeon_mode_info *mode_info = &dev_priv->mode_info; ++ struct radeon_encoder *radeon_encoder; ++ struct drm_encoder *encoder; ++ radeon_encoder = kzalloc(sizeof(struct radeon_encoder), GFP_KERNEL); ++ if (!radeon_encoder) { ++ return NULL; ++ } ++ ++ encoder = &radeon_encoder->base; ++ ++ /* don't put LVTMA on CRTC 1 - it should work but doesn't seem to */ ++ encoder->possible_crtcs = 0x1; ++ encoder->possible_clones = 0; ++ drm_encoder_init(dev, encoder, &radeon_atom_lvtma_enc_funcs, ++ DRM_MODE_ENCODER_LVDS); ++ ++ drm_encoder_helper_add(encoder, &radeon_atom_lvtma_helper_funcs); ++ radeon_encoder->atom_device = mode_info->bios_connector[bios_index].devices; ++ ++ /* TODO get the LVDS info from the BIOS for panel size etc. */ ++ /* get the lvds info from the bios */ ++ radeon_atombios_get_lvds_info(radeon_encoder); ++ ++ /* LVDS gets default RMX full scaling */ ++ radeon_encoder->rmx_type = RMX_FULL; ++ ++ return encoder; ++} ++ ++static void radeon_atom_dac_dpms(struct drm_encoder *encoder, int mode) ++{ ++ struct drm_device *dev = encoder->dev; ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); ++ struct radeon_crtc *radeon_crtc; ++ int atom_type = -1; ++ int index; ++ uint32_t bios_2_scratch, bios_3_scratch; ++ int crtc_id = 0; ++ ++ if (encoder->crtc) { ++ radeon_crtc = to_radeon_crtc(encoder->crtc); ++ crtc_id = radeon_crtc->crtc_id; ++ } ++ ++ atom_type = atom_dac_find_atom_type(radeon_encoder, NULL); ++ if (atom_type == -1) ++ return; ++ ++ if (dev_priv->chip_family >= CHIP_R600) { ++ bios_2_scratch = RADEON_READ(R600_BIOS_2_SCRATCH); ++ bios_3_scratch = RADEON_READ(R600_BIOS_3_SCRATCH); ++ } else { ++ bios_2_scratch = RADEON_READ(RADEON_BIOS_2_SCRATCH); ++ bios_3_scratch = RADEON_READ(RADEON_BIOS_3_SCRATCH); ++ } ++ ++ switch(atom_type) { ++ case ATOM_DEVICE_CRT1_INDEX: ++ index = GetIndexIntoMasterTable(COMMAND, DAC1OutputControl); ++ bios_2_scratch &= ~ATOM_S3_CRT1_CRTC_ACTIVE; ++ bios_3_scratch |= (crtc_id << 16); ++ switch(mode) { ++ case DRM_MODE_DPMS_ON: ++ bios_2_scratch &= ~ATOM_S2_CRT1_DPMS_STATE; ++ bios_3_scratch |= ATOM_S3_CRT1_ACTIVE; ++ break; ++ case DRM_MODE_DPMS_STANDBY: ++ case DRM_MODE_DPMS_SUSPEND: ++ case DRM_MODE_DPMS_OFF: ++ bios_2_scratch |= ATOM_S2_CRT1_DPMS_STATE; ++ bios_3_scratch &= ~ATOM_S3_CRT1_ACTIVE; ++ break; ++ } ++ break; ++ case ATOM_DEVICE_CRT2_INDEX: ++ index = GetIndexIntoMasterTable(COMMAND, DAC2OutputControl); ++ bios_2_scratch &= ~ATOM_S3_CRT2_CRTC_ACTIVE; ++ bios_3_scratch |= (crtc_id << 20); ++ switch(mode) { ++ case DRM_MODE_DPMS_ON: ++ bios_2_scratch &= ~ATOM_S2_CRT2_DPMS_STATE; ++ bios_3_scratch |= ATOM_S3_CRT2_ACTIVE; ++ break; ++ case DRM_MODE_DPMS_STANDBY: ++ case DRM_MODE_DPMS_SUSPEND: ++ case DRM_MODE_DPMS_OFF: ++ bios_2_scratch |= ATOM_S2_CRT2_DPMS_STATE; ++ bios_3_scratch &= ~ATOM_S3_CRT2_ACTIVE; ++ break; ++ } ++ break; ++ case ATOM_DEVICE_TV1_INDEX: ++ index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl); ++ bios_3_scratch &= ~ATOM_S3_TV1_CRTC_ACTIVE; ++ bios_3_scratch |= (crtc_id << 18); ++ switch(mode) { ++ case DRM_MODE_DPMS_ON: ++ bios_2_scratch &= ~ATOM_S2_TV1_DPMS_STATE; ++ bios_3_scratch |= ATOM_S3_TV1_ACTIVE; ++ break; ++ case DRM_MODE_DPMS_STANDBY: ++ case DRM_MODE_DPMS_SUSPEND: ++ case DRM_MODE_DPMS_OFF: ++ bios_2_scratch |= ATOM_S2_TV1_DPMS_STATE; ++ bios_3_scratch &= ~ATOM_S3_TV1_ACTIVE; ++ break; ++ } ++ break; ++ case ATOM_DEVICE_CV_INDEX: ++ index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl); ++ bios_2_scratch &= ~ATOM_S3_CV_CRTC_ACTIVE; ++ bios_3_scratch |= (crtc_id << 24); ++ switch(mode) { ++ case DRM_MODE_DPMS_ON: ++ bios_2_scratch &= ~ATOM_S2_CV_DPMS_STATE; ++ bios_3_scratch |= ATOM_S3_CV_ACTIVE; ++ break; ++ case DRM_MODE_DPMS_STANDBY: ++ case DRM_MODE_DPMS_SUSPEND: ++ case DRM_MODE_DPMS_OFF: ++ bios_2_scratch |= ATOM_S2_CV_DPMS_STATE; ++ bios_3_scratch &= ~ATOM_S3_CV_ACTIVE; ++ break; ++ } ++ break; ++ default: ++ return; ++ } ++ ++ switch(mode) { ++ case DRM_MODE_DPMS_ON: ++ atombios_display_device_control(encoder, index, ATOM_ENABLE); ++ break; ++ case DRM_MODE_DPMS_STANDBY: ++ case DRM_MODE_DPMS_SUSPEND: ++ case DRM_MODE_DPMS_OFF: ++ atombios_display_device_control(encoder, index, ATOM_DISABLE); ++ break; ++ } ++ ++ if (dev_priv->chip_family >= CHIP_R600) { ++ RADEON_WRITE(R600_BIOS_2_SCRATCH, bios_2_scratch); ++ RADEON_WRITE(R600_BIOS_3_SCRATCH, bios_3_scratch); ++ } else { ++ RADEON_WRITE(RADEON_BIOS_2_SCRATCH, bios_2_scratch); ++ RADEON_WRITE(RADEON_BIOS_3_SCRATCH, bios_3_scratch); ++ } ++} ++ ++static bool radeon_atom_dac_mode_fixup(struct drm_encoder *encoder, ++ struct drm_display_mode *mode, ++ struct drm_display_mode *adjusted_mode) ++{ ++ ++ /* hw bug */ ++ if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ++ && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2))) ++ adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2; ++ ++ return true; ++} ++ ++static void radeon_atom_dac_prepare(struct drm_encoder *encoder) ++{ ++ radeon_atom_output_lock(encoder, true); ++ radeon_atom_dac_dpms(encoder, DRM_MODE_DPMS_OFF); ++} ++ ++static void radeon_atom_dac_commit(struct drm_encoder *encoder) ++{ ++ radeon_atom_dac_dpms(encoder, DRM_MODE_DPMS_ON); ++ radeon_atom_output_lock(encoder, false); ++} ++ ++static int atombios_dac_setup(struct drm_encoder *encoder, ++ struct drm_display_mode *mode, ++ int atom_type) ++{ ++ struct drm_device *dev = encoder->dev; ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); ++ DAC_ENCODER_CONTROL_PS_ALLOCATION args; ++ int id = (radeon_encoder->type.dac == DAC_TVDAC); ++ int index; ++ ++ memset(&args, 0, sizeof(args)); ++ if (id == 0) ++ index = GetIndexIntoMasterTable(COMMAND, DAC1EncoderControl); ++ else ++ index = GetIndexIntoMasterTable(COMMAND, DAC2EncoderControl); ++ ++ args.ucAction = 1; ++ args.usPixelClock = cpu_to_le16(mode->clock / 10); ++ if ((atom_type == ATOM_DEVICE_CRT1_INDEX) || ++ (atom_type == ATOM_DEVICE_CRT2_INDEX)) ++ args.ucDacStandard = id ? ATOM_DAC2_PS2 : ATOM_DAC1_PS2; ++ else if (atom_type == ATOM_DEVICE_CV_INDEX) ++ args.ucDacStandard = id ? ATOM_DAC2_CV : ATOM_DAC1_CV; ++ else if (atom_type == ATOM_DEVICE_TV1_INDEX) ++ args.ucDacStandard = id ? ATOM_DAC2_NTSC : ATOM_DAC1_NTSC; ++ /* TODO PAL */ ++ atom_execute_table(dev_priv->mode_info.atom_context, index, (uint32_t *)&args); ++ ++ return 0; ++} ++ ++static int atombios_tv1_setup(struct drm_encoder *encoder, ++ struct drm_display_mode *mode, ++ int atom_type) ++{ ++ struct drm_device *dev = encoder->dev; ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); ++ TV_ENCODER_CONTROL_PS_ALLOCATION args; ++ int index = GetIndexIntoMasterTable(COMMAND, TVEncoderControl); ++ ++ memset(&args, 0, sizeof(args)); ++ args.sTVEncoder.ucAction = 1; ++ if (atom_type == ATOM_DEVICE_CV_INDEX) ++ args.sTVEncoder.ucTvStandard = ATOM_TV_CV; ++ else { ++ // TODO PAL ++ args.sTVEncoder.ucTvStandard = ATOM_TV_NTSC; ++ } ++ ++ args.sTVEncoder.usPixelClock = cpu_to_le16(mode->clock / 10); ++ ++ atom_execute_table(dev_priv->mode_info.atom_context, index, (uint32_t *)&args); ++ return 0; ++} ++ ++static void radeon_atom_dac_mode_set(struct drm_encoder *encoder, ++ struct drm_display_mode *mode, ++ struct drm_display_mode *adjusted_mode) ++{ ++ struct drm_device *dev = encoder->dev; ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); ++ int atom_type = -1; ++ ++ atom_type = atom_dac_find_atom_type(radeon_encoder, NULL); ++ if (atom_type == -1) ++ return; ++ ++ atombios_scaler_setup(encoder, mode); ++ atombios_set_crtc_source(encoder, atom_type); ++ ++ atombios_dac_setup(encoder, adjusted_mode, atom_type); ++ if ((atom_type == ATOM_DEVICE_TV1_INDEX) || ++ (atom_type == ATOM_DEVICE_CV_INDEX)) ++ atombios_tv1_setup(encoder, adjusted_mode, atom_type); ++ ++} ++ ++static bool atom_dac_load_detect(struct drm_encoder *encoder, int atom_devices) ++{ ++ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); ++ struct drm_device *dev = encoder->dev; ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ DAC_LOAD_DETECTION_PS_ALLOCATION args; ++ int index = GetIndexIntoMasterTable(COMMAND, DAC_LoadDetection); ++ ++ memset(&args, 0, sizeof(args)); ++ args.sDacload.ucMisc = 0; ++ args.sDacload.ucDacType = (radeon_encoder->type.dac == DAC_PRIMARY) ? ATOM_DAC_A : ATOM_DAC_B; ++ ++ if (atom_devices & ATOM_DEVICE_CRT1_SUPPORT) ++ args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CRT1_SUPPORT); ++ else if (atom_devices & ATOM_DEVICE_CRT2_SUPPORT) ++ args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CRT2_SUPPORT); ++ else if (atom_devices & ATOM_DEVICE_CV_SUPPORT) { ++ args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CV_SUPPORT); ++ if (radeon_is_dce3(dev_priv)) ++ args.sDacload.ucMisc = 1; ++ } else if (atom_devices & ATOM_DEVICE_TV1_SUPPORT) { ++ args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_TV1_SUPPORT); ++ if (radeon_is_dce3(dev_priv)) ++ args.sDacload.ucMisc = 1; ++ } else ++ return false; ++ ++ DRM_DEBUG("writing %x %x\n", args.sDacload.usDeviceID, args.sDacload.ucDacType); ++ atom_execute_table(dev_priv->mode_info.atom_context, index, (uint32_t *)&args); ++ return true; ++} ++ ++static enum drm_connector_status radeon_atom_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector) ++{ ++ struct drm_device *dev = encoder->dev; ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); ++ int atom_type = -1; ++ uint32_t bios_0_scratch; ++ ++ atom_type = atom_dac_find_atom_type(radeon_encoder, connector); ++ if (atom_type == -1) { ++ DRM_DEBUG("exit after find \n"); ++ return connector_status_unknown; ++ } ++ ++ if(!atom_dac_load_detect(encoder, (1 << atom_type))) { ++ DRM_DEBUG("detect returned false \n"); ++ return connector_status_unknown; ++ } ++ ++ ++ if (dev_priv->chip_family >= CHIP_R600) ++ bios_0_scratch = RADEON_READ(R600_BIOS_0_SCRATCH); ++ else ++ bios_0_scratch = RADEON_READ(RADEON_BIOS_0_SCRATCH); ++ ++ DRM_DEBUG("Bios 0 scratch %x\n", bios_0_scratch); ++ if (radeon_encoder->atom_device & ATOM_DEVICE_CRT1_SUPPORT) { ++ if (bios_0_scratch & ATOM_S0_CRT1_MASK) ++ return connector_status_connected; ++ } else if (radeon_encoder->atom_device & ATOM_DEVICE_CRT2_SUPPORT) { ++ if (bios_0_scratch & ATOM_S0_CRT2_MASK) ++ return connector_status_connected; ++ } else if (radeon_encoder->atom_device & ATOM_DEVICE_CV_SUPPORT) { ++ if (bios_0_scratch & (ATOM_S0_CV_MASK|ATOM_S0_CV_MASK_A)) ++ return connector_status_connected; ++ } else if (radeon_encoder->atom_device & ATOM_DEVICE_TV1_SUPPORT) { ++ if (bios_0_scratch & (ATOM_S0_TV1_COMPOSITE | ATOM_S0_TV1_COMPOSITE_A)) ++ return connector_status_connected; // CTV ++ else if (bios_0_scratch & (ATOM_S0_TV1_SVIDEO | ATOM_S0_TV1_SVIDEO_A)) ++ return connector_status_connected; // STV ++ } ++ return connector_status_disconnected; ++} ++ ++static const struct drm_encoder_helper_funcs radeon_atom_dac_helper_funcs = { ++ .dpms = radeon_atom_dac_dpms, ++ .mode_fixup = radeon_atom_dac_mode_fixup, ++ .prepare = radeon_atom_dac_prepare, ++ .mode_set = radeon_atom_dac_mode_set, ++ .commit = radeon_atom_dac_commit, ++ .detect = radeon_atom_dac_detect, ++}; ++ ++static const struct drm_encoder_funcs radeon_atom_dac_enc_funcs = { ++ . destroy = radeon_enc_destroy, ++}; ++ ++ ++static void atombios_tmds1_setup(struct drm_encoder *encoder, ++ struct drm_display_mode *mode) ++{ ++ struct drm_device *dev = encoder->dev; ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); ++ TMDS1_ENCODER_CONTROL_PS_ALLOCATION args; ++ int index = GetIndexIntoMasterTable(COMMAND, TMDS1EncoderControl); ++ ++ memset(&args, 0, sizeof(args)); ++ args.ucAction = 1; ++ if (mode->clock > 165000) ++ args.ucMisc = 1; ++ else ++ args.ucMisc = 0; ++ ++ args.usPixelClock = cpu_to_le16(mode->clock / 10); ++ ++ atom_execute_table(dev_priv->mode_info.atom_context, index, (uint32_t *)&args); ++} ++ ++static void atombios_tmds2_setup(struct drm_encoder *encoder, ++ struct drm_display_mode *mode) ++{ ++ struct drm_device *dev = encoder->dev; ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); ++ TMDS2_ENCODER_CONTROL_PS_ALLOCATION args; ++ int index = GetIndexIntoMasterTable(COMMAND, TMDS2EncoderControl); ++ ++ memset(&args, 0, sizeof(args)); ++ args.ucAction = 1; ++ if (mode->clock > 165000) ++ args.ucMisc = 1; ++ else ++ args.ucMisc = 0; ++ ++ args.usPixelClock = cpu_to_le16(mode->clock / 10); ++ ++ atom_execute_table(dev_priv->mode_info.atom_context, index, (uint32_t *)&args); ++} ++ ++ ++void atombios_ext_tmds_setup(struct drm_encoder *encoder, ++ struct drm_display_mode *mode) ++{ ++ struct drm_device *dev = encoder->dev; ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); ++ ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION args; ++ int index = GetIndexIntoMasterTable(COMMAND, DVOEncoderControl); ++ ++ memset(&args, 0, sizeof(args)); ++ args.sXTmdsEncoder.ucEnable = 1; ++ ++ if (mode->clock > 165000) ++ args.sXTmdsEncoder.ucMisc = 1; ++ else ++ args.sXTmdsEncoder.ucMisc = 0; ++ ++ // TODO 6-bit DAC ++// args.usPixelClock = cpu_to_le16(mode->clock / 10); ++ ++ atom_execute_table(dev_priv->mode_info.atom_context, index, (uint32_t *)&args); ++} ++ ++static void atombios_dig1_setup(struct drm_encoder *encoder, ++ struct drm_display_mode *mode) ++{ ++ struct drm_device *dev = encoder->dev; ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ DIG_ENCODER_CONTROL_PS_ALLOCATION args; ++ int index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl); ++ ++ args.ucAction = 1; ++ args.usPixelClock = mode->clock / 10; ++ args.ucConfig = ATOM_ENCODER_CONFIG_TRANSMITTER1; ++ ++ // TODO coherent mode ++// if (encoder->coherent_mode) ++// args.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT; ++ ++ if (mode->clock > 165000) { ++ args.ucConfig |= ATOM_ENCODER_CONFIG_LINKA_B; ++ args.ucLaneNum = 8; ++ } else { ++ args.ucConfig |= ATOM_ENCODER_CONFIG_LINKA; ++ args.ucLaneNum = 4; ++ } ++ ++ // TODO Encoder MODE ++ atom_execute_table(dev_priv->mode_info.atom_context, index, (uint32_t *)&args); ++} ++ ++static void atombios_ddia_setup(struct drm_encoder *encoder, ++ struct drm_display_mode *mode) ++{ ++ struct drm_device *dev = encoder->dev; ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ DVO_ENCODER_CONTROL_PS_ALLOCATION args; ++ int index = GetIndexIntoMasterTable(COMMAND, DVOEncoderControl); ++ ++ args.sDVOEncoder.ucAction = ATOM_ENABLE; ++ args.sDVOEncoder.usPixelClock = mode->clock / 10; ++ ++ if (mode->clock > 165000) ++ args.sDVOEncoder.usDevAttr.sDigAttrib.ucAttribute = PANEL_ENCODER_MISC_DUAL; ++ else ++ args.sDVOEncoder.usDevAttr.sDigAttrib.ucAttribute = 0; ++ ++ atom_execute_table(dev_priv->mode_info.atom_context, index, (uint32_t *)&args); ++} ++ ++struct drm_encoder *radeon_encoder_atom_dac_add(struct drm_device *dev, int bios_index, int dac_type, int with_tv) ++{ ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ struct radeon_mode_info *mode_info = &dev_priv->mode_info; ++ struct radeon_encoder *radeon_encoder = NULL; ++ struct drm_encoder *encoder; ++ int type = with_tv ? DRM_MODE_ENCODER_TVDAC : DRM_MODE_ENCODER_DAC; ++ int found = 0; ++ int digital_enc_mask = ~(ATOM_DEVICE_DFP1_SUPPORT | ATOM_DEVICE_DFP2_SUPPORT | ATOM_DEVICE_DFP3_SUPPORT | ++ ATOM_DEVICE_LCD1_SUPPORT); ++ /* we may already have added this encoder */ ++ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { ++ if (encoder->encoder_type != DRM_MODE_ENCODER_DAC || ++ encoder->encoder_type != DRM_MODE_ENCODER_TVDAC) ++ continue; ++ ++ radeon_encoder = to_radeon_encoder(encoder); ++ if (radeon_encoder->type.dac == dac_type) { ++ found = 1; ++ break; ++ } ++ } ++ ++ if (found) { ++ /* upgrade to a TV controlling DAC */ ++ if (type == DRM_MODE_ENCODER_TVDAC) ++ encoder->encoder_type = type; ++ radeon_encoder->atom_device |= mode_info->bios_connector[bios_index].devices; ++ radeon_encoder->atom_device &= digital_enc_mask; ++ return encoder; ++ } ++ ++ radeon_encoder = kzalloc(sizeof(struct radeon_encoder), GFP_KERNEL); ++ if (!radeon_encoder) { ++ return NULL; ++ } ++ ++ encoder = &radeon_encoder->base; ++ ++ encoder->possible_crtcs = 0x3; ++ encoder->possible_clones = 0; ++ drm_encoder_init(dev, encoder, &radeon_atom_dac_enc_funcs, ++ type); ++ ++ drm_encoder_helper_add(encoder, &radeon_atom_dac_helper_funcs); ++ radeon_encoder->type.dac = dac_type; ++ radeon_encoder->atom_device = mode_info->bios_connector[bios_index].devices; ++ ++ /* mask off any digital encoders */ ++ radeon_encoder->atom_device &= digital_enc_mask; ++ return encoder; ++} ++ ++static void radeon_atom_tmds_dpms(struct drm_encoder *encoder, int mode) ++{ ++ struct drm_device *dev = encoder->dev; ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); ++ struct radeon_crtc *radeon_crtc = NULL; ++ int crtc_id = 0; ++ int atom_type = -1; ++ int index = -1; ++ uint32_t bios_2_scratch, bios_3_scratch; ++ ++ if (encoder->crtc) { ++ radeon_crtc = to_radeon_crtc(encoder->crtc); ++ crtc_id = radeon_crtc->crtc_id; ++ } else if (mode == DRM_MODE_DPMS_ON) ++ return; ++ ++ if (radeon_encoder->atom_device & ATOM_DEVICE_DFP1_SUPPORT) ++ atom_type = ATOM_DEVICE_DFP1_INDEX; ++ if (radeon_encoder->atom_device & ATOM_DEVICE_DFP2_SUPPORT) ++ atom_type = ATOM_DEVICE_DFP2_INDEX; ++ if (radeon_encoder->atom_device & ATOM_DEVICE_DFP3_SUPPORT) ++ atom_type = ATOM_DEVICE_DFP3_INDEX; ++ ++ if (atom_type == -1) ++ return; ++ ++ if (dev_priv->chip_family >= CHIP_R600) { ++ bios_2_scratch = RADEON_READ(R600_BIOS_2_SCRATCH); ++ bios_3_scratch = RADEON_READ(R600_BIOS_3_SCRATCH); ++ } else { ++ bios_2_scratch = RADEON_READ(RADEON_BIOS_2_SCRATCH); ++ bios_3_scratch = RADEON_READ(RADEON_BIOS_3_SCRATCH); ++ } ++ ++ switch(atom_type) { ++ case ATOM_DEVICE_DFP1_INDEX: ++ index = GetIndexIntoMasterTable(COMMAND, TMDSAOutputControl); ++ bios_2_scratch &= ~ATOM_S3_DFP1_CRTC_ACTIVE; ++ bios_3_scratch |= (crtc_id << 19); ++ switch(mode) { ++ case DRM_MODE_DPMS_ON: ++ bios_2_scratch &= ~ATOM_S2_DFP1_DPMS_STATE; ++ bios_3_scratch |= ATOM_S3_DFP1_ACTIVE; ++ break; ++ case DRM_MODE_DPMS_STANDBY: ++ case DRM_MODE_DPMS_SUSPEND: ++ case DRM_MODE_DPMS_OFF: ++ bios_2_scratch |= ATOM_S2_DFP1_DPMS_STATE; ++ bios_3_scratch &= ~ATOM_S3_DFP1_ACTIVE; ++ break; ++ } ++ break; ++ case ATOM_DEVICE_DFP2_INDEX: ++ index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl); ++ bios_2_scratch &= ~ATOM_S3_DFP2_CRTC_ACTIVE; ++ bios_3_scratch |= (crtc_id << 23); ++ switch(mode) { ++ case DRM_MODE_DPMS_ON: ++ bios_2_scratch &= ~ATOM_S2_DFP2_DPMS_STATE; ++ bios_3_scratch |= ATOM_S3_DFP2_ACTIVE; ++ break; ++ case DRM_MODE_DPMS_STANDBY: ++ case DRM_MODE_DPMS_SUSPEND: ++ case DRM_MODE_DPMS_OFF: ++ bios_2_scratch |= ATOM_S2_DFP2_DPMS_STATE; ++ bios_3_scratch &= ~ATOM_S3_DFP2_ACTIVE; ++ break; ++ } ++ break; ++ case ATOM_DEVICE_DFP3_INDEX: ++ index = GetIndexIntoMasterTable(COMMAND, LVTMAOutputControl); ++ bios_2_scratch &= ~ATOM_S3_DFP3_CRTC_ACTIVE; ++ bios_3_scratch |= (crtc_id << 25); ++ switch(mode) { ++ case DRM_MODE_DPMS_ON: ++ bios_2_scratch &= ~ATOM_S2_DFP3_DPMS_STATE; ++ bios_3_scratch |= ATOM_S3_DFP3_ACTIVE; ++ break; ++ case DRM_MODE_DPMS_STANDBY: ++ case DRM_MODE_DPMS_SUSPEND: ++ case DRM_MODE_DPMS_OFF: ++ bios_2_scratch |= ATOM_S2_DFP3_DPMS_STATE; ++ bios_3_scratch &= ~ATOM_S3_DFP3_ACTIVE; ++ break; ++ } ++ break; ++ } ++ ++ if (index == -1) ++ return; ++ ++ switch(mode) { ++ case DRM_MODE_DPMS_ON: ++ atombios_display_device_control(encoder, index, ATOM_ENABLE); ++ break; ++ case DRM_MODE_DPMS_STANDBY: ++ case DRM_MODE_DPMS_SUSPEND: ++ case DRM_MODE_DPMS_OFF: ++ atombios_display_device_control(encoder, index, ATOM_DISABLE); ++ break; ++ } ++ ++ if (dev_priv->chip_family >= CHIP_R600) { ++ RADEON_WRITE(R600_BIOS_2_SCRATCH, bios_2_scratch); ++ RADEON_WRITE(R600_BIOS_3_SCRATCH, bios_3_scratch); ++ } else { ++ RADEON_WRITE(RADEON_BIOS_2_SCRATCH, bios_2_scratch); ++ RADEON_WRITE(RADEON_BIOS_3_SCRATCH, bios_3_scratch); ++ } ++} ++ ++static bool radeon_atom_tmds_mode_fixup(struct drm_encoder *encoder, ++ struct drm_display_mode *mode, ++ struct drm_display_mode *adjusted_mode) ++{ ++ ++ /* hw bug */ ++ if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ++ && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2))) ++ adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2; ++ ++ return true; ++} ++ ++static void radeon_atom_tmds_mode_set(struct drm_encoder *encoder, ++ struct drm_display_mode *mode, ++ struct drm_display_mode *adjusted_mode) ++{ ++ struct drm_device *dev = encoder->dev; ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); ++ int atom_type; ++ ++ if (radeon_encoder->atom_device & ATOM_DEVICE_DFP1_SUPPORT) ++ atom_type = ATOM_DEVICE_DFP1_INDEX; ++ if (radeon_encoder->atom_device & ATOM_DEVICE_DFP2_SUPPORT) ++ atom_type = ATOM_DEVICE_DFP2_INDEX; ++ if (radeon_encoder->atom_device & ATOM_DEVICE_DFP3_SUPPORT) ++ atom_type = ATOM_DEVICE_DFP3_INDEX; ++ ++ atombios_scaler_setup(encoder, mode); ++ atombios_set_crtc_source(encoder, atom_type); ++ ++ if (atom_type == ATOM_DEVICE_DFP1_INDEX) ++ atombios_tmds1_setup(encoder, adjusted_mode); ++ if (atom_type == ATOM_DEVICE_DFP2_INDEX) { ++ if ((dev_priv->chip_family == CHIP_RS600) || ++ (dev_priv->chip_family == CHIP_RS690) || ++ (dev_priv->chip_family == CHIP_RS740)) ++ atombios_ddia_setup(encoder, adjusted_mode); ++ else ++ atombios_ext_tmds_setup(encoder, adjusted_mode); ++ } ++ if (atom_type == ATOM_DEVICE_DFP3_INDEX) ++ atombios_tmds2_setup(encoder, adjusted_mode); ++ radeon_dfp_disable_dither(encoder, atom_type); ++ ++ ++} ++ ++static void radeon_atom_tmds_prepare(struct drm_encoder *encoder) ++{ ++ radeon_atom_output_lock(encoder, true); ++ radeon_atom_tmds_dpms(encoder, DRM_MODE_DPMS_OFF); ++} ++ ++static void radeon_atom_tmds_commit(struct drm_encoder *encoder) ++{ ++ radeon_atom_tmds_dpms(encoder, DRM_MODE_DPMS_ON); ++ radeon_atom_output_lock(encoder, false); ++} ++ ++static const struct drm_encoder_helper_funcs radeon_atom_tmds_helper_funcs = { ++ .dpms = radeon_atom_tmds_dpms, ++ .mode_fixup = radeon_atom_tmds_mode_fixup, ++ .prepare = radeon_atom_tmds_prepare, ++ .mode_set = radeon_atom_tmds_mode_set, ++ .commit = radeon_atom_tmds_commit, ++ /* no detect for TMDS */ ++}; ++ ++static const struct drm_encoder_funcs radeon_atom_tmds_enc_funcs = { ++ . destroy = radeon_enc_destroy, ++}; ++ ++struct drm_encoder *radeon_encoder_atom_tmds_add(struct drm_device *dev, int bios_index, int tmds_type) ++{ ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ struct radeon_mode_info *mode_info = &dev_priv->mode_info; ++ struct radeon_encoder *radeon_encoder = NULL; ++ struct drm_encoder *encoder; ++ int analog_enc_mask = ~(ATOM_DEVICE_CRT1_SUPPORT | ATOM_DEVICE_CRT2_SUPPORT); ++ ++ radeon_encoder = kzalloc(sizeof(struct radeon_encoder), GFP_KERNEL); ++ if (!radeon_encoder) { ++ return NULL; ++ } ++ ++ encoder = &radeon_encoder->base; ++ ++ encoder->possible_crtcs = 0x3; ++ encoder->possible_clones = 0; ++ drm_encoder_init(dev, encoder, &radeon_atom_tmds_enc_funcs, ++ DRM_MODE_ENCODER_TMDS); ++ ++ drm_encoder_helper_add(encoder, &radeon_atom_tmds_helper_funcs); ++ ++ radeon_encoder->atom_device = mode_info->bios_connector[bios_index].devices; ++ ++ /* mask off any analog encoders */ ++ radeon_encoder->atom_device &= analog_enc_mask; ++ return encoder; ++} +diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c +new file mode 100644 +index 0000000..244b066 +--- /dev/null ++++ b/drivers/gpu/drm/radeon/radeon_fb.c +@@ -0,0 +1,927 @@ ++/* ++ * Copyright © 2007 David Airlie ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING ++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: ++ * David Airlie ++ */ ++ /* ++ * Modularization ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "drmP.h" ++#include "drm.h" ++#include "drm_crtc.h" ++#include "drm_crtc_helper.h" ++#include "radeon_drm.h" ++#include "radeon_drv.h" ++ ++struct radeonfb_par { ++ struct drm_device *dev; ++ struct drm_display_mode *our_mode; ++ struct radeon_framebuffer *radeon_fb; ++ int crtc_count; ++ /* crtc currently bound to this */ ++ uint32_t crtc_ids[2]; ++}; ++/* ++static int ++var_to_refresh(const struct fb_var_screeninfo *var) ++{ ++ int xtot = var->xres + var->left_margin + var->right_margin + ++ var->hsync_len; ++ int ytot = var->yres + var->upper_margin + var->lower_margin + ++ var->vsync_len; ++ ++ return (1000000000 / var->pixclock * 1000 + 500) / xtot / ytot; ++}*/ ++ ++static int radeonfb_setcolreg(unsigned regno, unsigned red, unsigned green, ++ unsigned blue, unsigned transp, ++ struct fb_info *info) ++{ ++ struct radeonfb_par *par = info->par; ++ struct drm_device *dev = par->dev; ++ struct drm_crtc *crtc; ++ int i; ++ ++ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { ++ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); ++ struct drm_mode_set *modeset = &radeon_crtc->mode_set; ++ struct drm_framebuffer *fb = modeset->fb; ++ ++ for (i = 0; i < par->crtc_count; i++) ++ if (crtc->base.id == par->crtc_ids[i]) ++ break; ++ ++ if (i == par->crtc_count) ++ continue; ++ ++ ++ if (regno > 255) ++ return 1; ++ ++ if (fb->depth == 8) { ++ radeon_crtc_fb_gamma_set(crtc, red, green, blue, regno); ++ return 0; ++ } ++ ++ if (regno < 16) { ++ switch (fb->depth) { ++ case 15: ++ fb->pseudo_palette[regno] = ((red & 0xf800) >> 1) | ++ ((green & 0xf800) >> 6) | ++ ((blue & 0xf800) >> 11); ++ break; ++ case 16: ++ fb->pseudo_palette[regno] = (red & 0xf800) | ++ ((green & 0xfc00) >> 5) | ++ ((blue & 0xf800) >> 11); ++ break; ++ case 24: ++ case 32: ++ fb->pseudo_palette[regno] = ((red & 0xff00) << 8) | ++ (green & 0xff00) | ++ ((blue & 0xff00) >> 8); ++ break; ++ } ++ } ++ } ++ return 0; ++} ++ ++static int radeonfb_check_var(struct fb_var_screeninfo *var, ++ struct fb_info *info) ++{ ++ struct radeonfb_par *par = info->par; ++ struct radeon_framebuffer *radeon_fb = par->radeon_fb; ++ struct drm_framebuffer *fb = &radeon_fb->base; ++ int depth; ++ ++ if (var->pixclock == -1 || !var->pixclock) ++ return -EINVAL; ++ ++ /* Need to resize the fb object !!! */ ++ if (var->xres > fb->width || var->yres > fb->height) { ++ DRM_ERROR("Requested width/height is greater than current fb object %dx%d > %dx%d\n",var->xres,var->yres,fb->width,fb->height); ++ DRM_ERROR("Need resizing code.\n"); ++ return -EINVAL; ++ } ++ ++ switch (var->bits_per_pixel) { ++ case 16: ++ depth = (var->green.length == 6) ? 16 : 15; ++ break; ++ case 32: ++ depth = (var->transp.length > 0) ? 32 : 24; ++ break; ++ default: ++ depth = var->bits_per_pixel; ++ break; ++ } ++ ++ switch (depth) { ++ case 8: ++ var->red.offset = 0; ++ var->green.offset = 0; ++ var->blue.offset = 0; ++ var->red.length = 8; ++ var->green.length = 8; ++ var->blue.length = 8; ++ var->transp.length = 0; ++ var->transp.offset = 0; ++ break; ++ case 15: ++ var->red.offset = 10; ++ var->green.offset = 5; ++ var->blue.offset = 0; ++ var->red.length = 5; ++ var->green.length = 5; ++ var->blue.length = 5; ++ var->transp.length = 1; ++ var->transp.offset = 15; ++ break; ++ case 16: ++ var->red.offset = 11; ++ var->green.offset = 5; ++ var->blue.offset = 0; ++ var->red.length = 5; ++ var->green.length = 6; ++ var->blue.length = 5; ++ var->transp.length = 0; ++ var->transp.offset = 0; ++ break; ++ case 24: ++ var->red.offset = 16; ++ var->green.offset = 8; ++ var->blue.offset = 0; ++ var->red.length = 8; ++ var->green.length = 8; ++ var->blue.length = 8; ++ var->transp.length = 0; ++ var->transp.offset = 0; ++ break; ++ case 32: ++ var->red.offset = 16; ++ var->green.offset = 8; ++ var->blue.offset = 0; ++ var->red.length = 8; ++ var->green.length = 8; ++ var->blue.length = 8; ++ var->transp.length = 8; ++ var->transp.offset = 24; ++ break; ++ default: ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++/* this will let fbcon do the mode init */ ++/* FIXME: take mode config lock? */ ++static int radeonfb_set_par(struct fb_info *info) ++{ ++ struct radeonfb_par *par = info->par; ++ struct drm_device *dev = par->dev; ++ struct fb_var_screeninfo *var = &info->var; ++ int i; ++ ++ DRM_DEBUG("%d %d\n", var->xres, var->pixclock); ++ ++ if (var->pixclock != -1) { ++ ++ DRM_ERROR("PIXEL CLCOK SET\n"); ++ return -EINVAL; ++ } else { ++ struct drm_crtc *crtc; ++ int ret; ++ ++ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { ++ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); ++ ++ for (i = 0; i < par->crtc_count; i++) ++ if (crtc->base.id == par->crtc_ids[i]) ++ break; ++ ++ if (i == par->crtc_count) ++ continue; ++ ++ if (crtc->fb == radeon_crtc->mode_set.fb) { ++ ret = crtc->funcs->set_config(&radeon_crtc->mode_set); ++ if (ret) ++ return ret; ++ } ++ } ++ return 0; ++ } ++} ++ ++static int radeonfb_pan_display(struct fb_var_screeninfo *var, ++ struct fb_info *info) ++{ ++ struct radeonfb_par *par = info->par; ++ struct drm_device *dev = par->dev; ++ struct drm_mode_set *modeset; ++ struct drm_crtc *crtc; ++ struct radeon_crtc *radeon_crtc; ++ int ret = 0; ++ int i; ++ ++ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { ++ ++ for (i = 0; i < par->crtc_count; i++) ++ if (crtc->base.id == par->crtc_ids[i]) ++ break; ++ ++ if (i == par->crtc_count) ++ continue; ++ ++ radeon_crtc = to_radeon_crtc(crtc); ++ modeset = &radeon_crtc->mode_set; ++ ++ modeset->x = var->xoffset; ++ modeset->y = var->yoffset; ++ ++ if (modeset->num_connectors) { ++ ret = crtc->funcs->set_config(modeset); ++ ++ if (!ret) { ++ info->var.xoffset = var->xoffset; ++ info->var.yoffset = var->yoffset; ++ } ++ } ++ } ++ ++ return ret; ++} ++ ++static void radeonfb_on(struct fb_info *info) ++{ ++ struct radeonfb_par *par = info->par; ++ struct drm_device *dev = par->dev; ++ struct drm_crtc *crtc; ++ struct drm_encoder *encoder; ++ int i; ++ ++ /* ++ * For each CRTC in this fb, find all associated encoders ++ * and turn them off, then turn off the CRTC. ++ */ ++ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { ++ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; ++ ++ for (i = 0; i < par->crtc_count; i++) ++ if (crtc->base.id == par->crtc_ids[i]) ++ break; ++ ++ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); ++ ++ /* Found a CRTC on this fb, now find encoders */ ++ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { ++ if (encoder->crtc == crtc) { ++ struct drm_encoder_helper_funcs *encoder_funcs; ++ encoder_funcs = encoder->helper_private; ++ encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); ++ } ++ } ++ } ++} ++ ++static void radeonfb_off(struct fb_info *info, int dpms_mode) ++{ ++ struct radeonfb_par *par = info->par; ++ struct drm_device *dev = par->dev; ++ struct drm_crtc *crtc; ++ struct drm_encoder *encoder; ++ int i; ++ ++ /* ++ * For each CRTC in this fb, find all associated encoders ++ * and turn them off, then turn off the CRTC. ++ */ ++ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { ++ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; ++ ++ for (i = 0; i < par->crtc_count; i++) ++ if (crtc->base.id == par->crtc_ids[i]) ++ break; ++ ++ /* Found a CRTC on this fb, now find encoders */ ++ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { ++ if (encoder->crtc == crtc) { ++ struct drm_encoder_helper_funcs *encoder_funcs; ++ encoder_funcs = encoder->helper_private; ++ encoder_funcs->dpms(encoder, dpms_mode); ++ } ++ } ++ if (dpms_mode == DRM_MODE_DPMS_OFF) ++ crtc_funcs->dpms(crtc, dpms_mode); ++ } ++} ++ ++int radeonfb_blank(int blank, struct fb_info *info) ++{ ++ switch (blank) { ++ case FB_BLANK_UNBLANK: ++ radeonfb_on(info); ++ break; ++ case FB_BLANK_NORMAL: ++ radeonfb_off(info, DRM_MODE_DPMS_STANDBY); ++ break; ++ case FB_BLANK_HSYNC_SUSPEND: ++ radeonfb_off(info, DRM_MODE_DPMS_STANDBY); ++ break; ++ case FB_BLANK_VSYNC_SUSPEND: ++ radeonfb_off(info, DRM_MODE_DPMS_SUSPEND); ++ break; ++ case FB_BLANK_POWERDOWN: ++ radeonfb_off(info, DRM_MODE_DPMS_OFF); ++ break; ++ } ++ return 0; ++} ++ ++static struct fb_ops radeonfb_ops = { ++ .owner = THIS_MODULE, ++ //.fb_open = radeonfb_open, ++ //.fb_read = radeonfb_read, ++ //.fb_write = radeonfb_write, ++ //.fb_release = radeonfb_release, ++ //.fb_ioctl = radeonfb_ioctl, ++ .fb_check_var = radeonfb_check_var, ++ .fb_set_par = radeonfb_set_par, ++ .fb_setcolreg = radeonfb_setcolreg, ++ .fb_fillrect = cfb_fillrect, ++ .fb_copyarea = cfb_copyarea, //radeonfb_copyarea, ++ .fb_imageblit = cfb_imageblit, //radeonfb_imageblit, ++ .fb_pan_display = radeonfb_pan_display, ++ .fb_blank = radeonfb_blank, ++}; ++ ++/** ++ * Curretly it is assumed that the old framebuffer is reused. ++ * ++ * LOCKING ++ * caller should hold the mode config lock. ++ * ++ */ ++int radeonfb_resize(struct drm_device *dev, struct drm_crtc *crtc) ++{ ++ struct fb_info *info; ++ struct drm_framebuffer *fb; ++ struct drm_display_mode *mode = crtc->desired_mode; ++ ++ fb = crtc->fb; ++ if (!fb) ++ return 1; ++ ++ info = fb->fbdev; ++ if (!info) ++ return 1; ++ ++ if (!mode) ++ return 1; ++ ++ info->var.xres = mode->hdisplay; ++ info->var.right_margin = mode->hsync_start - mode->hdisplay; ++ info->var.hsync_len = mode->hsync_end - mode->hsync_start; ++ info->var.left_margin = mode->htotal - mode->hsync_end; ++ info->var.yres = mode->vdisplay; ++ info->var.lower_margin = mode->vsync_start - mode->vdisplay; ++ info->var.vsync_len = mode->vsync_end - mode->vsync_start; ++ info->var.upper_margin = mode->vtotal - mode->vsync_end; ++ info->var.pixclock = 10000000 / mode->htotal * 1000 / mode->vtotal * 100; ++ /* avoid overflow */ ++ info->var.pixclock = info->var.pixclock * 1000 / mode->vrefresh; ++ ++ return 0; ++} ++EXPORT_SYMBOL(radeonfb_resize); ++ ++static struct drm_mode_set panic_mode; ++ ++int radeonfb_panic(struct notifier_block *n, unsigned long ununsed, ++ void *panic_str) ++{ ++ DRM_ERROR("panic occurred, switching back to text console\n"); ++ drm_crtc_helper_set_config(&panic_mode); ++ ++ return 0; ++} ++EXPORT_SYMBOL(radeonfb_panic); ++ ++static struct notifier_block paniced = { ++ .notifier_call = radeonfb_panic, ++}; ++ ++static int radeon_align_pitch(struct drm_device *dev, int width, int bpp) ++{ ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ int aligned = width; ++ int align_large = (radeon_is_avivo(dev_priv)); ++ int pitch_mask = 0; ++ ++ switch(bpp / 8) { ++ case 1: pitch_mask = align_large ? 255 : 127; break; ++ case 2: pitch_mask = align_large ? 127 : 31; break; ++ case 3: ++ case 4: pitch_mask = align_large ? 63 : 15; break; ++ } ++ ++ aligned += pitch_mask; ++ aligned &= ~pitch_mask; ++ return aligned; ++} ++ ++int radeonfb_create(struct drm_device *dev, uint32_t fb_width, uint32_t fb_height, ++ uint32_t surface_width, uint32_t surface_height, ++ struct radeon_framebuffer **radeon_fb_p) ++{ ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ struct fb_info *info; ++ struct radeonfb_par *par; ++ struct drm_framebuffer *fb; ++ struct radeon_framebuffer *radeon_fb; ++ struct drm_mode_fb_cmd mode_cmd; ++ struct drm_gem_object *fbo = NULL; ++ struct drm_radeon_gem_object *obj_priv; ++ struct device *device = &dev->pdev->dev; ++ int size, aligned_size, ret; ++ ++ mode_cmd.width = surface_width;/* crtc->desired_mode->hdisplay; */ ++ mode_cmd.height = surface_height;/* crtc->desired_mode->vdisplay; */ ++ ++ mode_cmd.bpp = 32; ++ /* need to align pitch with crtc limits */ ++ mode_cmd.pitch = radeon_align_pitch(dev, mode_cmd.width, mode_cmd.bpp) * ((mode_cmd.bpp + 1) / 8); ++ mode_cmd.depth = 24; ++ ++ size = mode_cmd.pitch * mode_cmd.height; ++ aligned_size = ALIGN(size, PAGE_SIZE); ++ ++ fbo = radeon_gem_object_alloc(dev, aligned_size, 1, RADEON_GEM_DOMAIN_VRAM, 0); ++ if (!fbo) { ++ printk(KERN_ERR "failed to allocate framebuffer\n"); ++ ret = -ENOMEM; ++ goto out; ++ } ++ obj_priv = fbo->driver_private; ++ ++ ret = radeon_gem_object_pin(fbo, PAGE_SIZE, RADEON_GEM_DOMAIN_VRAM); ++ if (ret) { ++ DRM_ERROR("failed to pin fb: %d\n", ret); ++ mutex_lock(&dev->struct_mutex); ++ goto out_unref; ++ } ++ ++ dev_priv->mm.vram_visible -= aligned_size; ++ ++ mutex_lock(&dev->struct_mutex); ++ fb = radeon_framebuffer_create(dev, &mode_cmd, fbo); ++ if (!fb) { ++ DRM_ERROR("failed to allocate fb.\n"); ++ ret = -ENOMEM; ++ goto out_unref; ++ } ++ ++ list_add(&fb->filp_head, &dev->mode_config.fb_kernel_list); ++ ++ radeon_fb = to_radeon_framebuffer(fb); ++ *radeon_fb_p = radeon_fb; ++ ++ info = framebuffer_alloc(sizeof(struct radeonfb_par), device); ++ if (!info) { ++ ret = -ENOMEM; ++ goto out_unref; ++ } ++ ++ par = info->par; ++ ++ strcpy(info->fix.id, "radeondrmfb"); ++ info->fix.type = FB_TYPE_PACKED_PIXELS; ++ info->fix.visual = FB_VISUAL_TRUECOLOR; ++ info->fix.type_aux = 0; ++ info->fix.xpanstep = 1; /* doing it in hw */ ++ info->fix.ypanstep = 1; /* doing it in hw */ ++ info->fix.ywrapstep = 0; ++ info->fix.accel = FB_ACCEL_I830; ++ info->fix.type_aux = 0; ++ ++ info->flags = FBINFO_DEFAULT; ++ ++ info->fbops = &radeonfb_ops; ++ ++ info->fix.line_length = fb->pitch; ++ info->fix.smem_start = dev->mode_config.fb_base + obj_priv->bo->offset; ++ info->fix.smem_len = size; ++ ++ info->flags = FBINFO_DEFAULT; ++ ++ ret = drm_bo_kmap(obj_priv->bo, 0, PAGE_ALIGN(size) >> PAGE_SHIFT, ++ &radeon_fb->kmap_obj); ++ info->screen_base = radeon_fb->kmap_obj.virtual; ++ if (!info->screen_base) { ++ ret = -ENOSPC; ++ goto out_unref; ++ } ++ info->screen_size = size; ++ ++ memset(info->screen_base, 0, size); ++ ++ info->pseudo_palette = fb->pseudo_palette; ++ info->var.xres_virtual = fb->width; ++ info->var.yres_virtual = fb->height; ++ info->var.bits_per_pixel = fb->bits_per_pixel; ++ info->var.xoffset = 0; ++ info->var.yoffset = 0; ++ info->var.activate = FB_ACTIVATE_NOW; ++ info->var.height = -1; ++ info->var.width = -1; ++ ++ info->var.xres = fb_width; ++ info->var.yres = fb_height; ++ ++ info->fix.mmio_start = pci_resource_start(dev->pdev, 2); ++ info->fix.mmio_len = pci_resource_len(dev->pdev, 2); ++ ++ info->pixmap.size = 64*1024; ++ info->pixmap.buf_align = 8; ++ info->pixmap.access_align = 32; ++ info->pixmap.flags = FB_PIXMAP_SYSTEM; ++ info->pixmap.scan_align = 1; ++ ++ DRM_DEBUG("fb depth is %d\n", fb->depth); ++ DRM_DEBUG(" pitch is %d\n", fb->pitch); ++ switch(fb->depth) { ++ case 8: ++ info->var.red.offset = 0; ++ info->var.green.offset = 0; ++ info->var.blue.offset = 0; ++ info->var.red.length = 8; /* 8bit DAC */ ++ info->var.green.length = 8; ++ info->var.blue.length = 8; ++ info->var.transp.offset = 0; ++ info->var.transp.length = 0; ++ break; ++ case 15: ++ info->var.red.offset = 10; ++ info->var.green.offset = 5; ++ info->var.blue.offset = 0; ++ info->var.red.length = 5; ++ info->var.green.length = 5; ++ info->var.blue.length = 5; ++ info->var.transp.offset = 15; ++ info->var.transp.length = 1; ++ break; ++ case 16: ++ info->var.red.offset = 11; ++ info->var.green.offset = 5; ++ info->var.blue.offset = 0; ++ info->var.red.length = 5; ++ info->var.green.length = 6; ++ info->var.blue.length = 5; ++ info->var.transp.offset = 0; ++ break; ++ case 24: ++ info->var.red.offset = 16; ++ info->var.green.offset = 8; ++ info->var.blue.offset = 0; ++ info->var.red.length = 8; ++ info->var.green.length = 8; ++ info->var.blue.length = 8; ++ info->var.transp.offset = 0; ++ info->var.transp.length = 0; ++ break; ++ case 32: ++ info->var.red.offset = 16; ++ info->var.green.offset = 8; ++ info->var.blue.offset = 0; ++ info->var.red.length = 8; ++ info->var.green.length = 8; ++ info->var.blue.length = 8; ++ info->var.transp.offset = 24; ++ info->var.transp.length = 8; ++ break; ++ default: ++ break; ++ } ++ ++ fb->fbdev = info; ++ ++ par->radeon_fb = radeon_fb; ++ par->dev = dev; ++ ++ /* To allow resizeing without swapping buffers */ ++ printk("allocated %p %dx%d fb: 0x%08x, bo %p\n", dev, radeon_fb->base.width, ++ radeon_fb->base.height, obj_priv->bo->offset, fbo); ++ ++ mutex_unlock(&dev->struct_mutex); ++ return 0; ++ ++out_unref: ++ drm_gem_object_unreference(fbo); ++ mutex_unlock(&dev->struct_mutex); ++out: ++ return ret; ++} ++ ++static int radeonfb_multi_fb_probe_crtc(struct drm_device *dev, struct drm_crtc *crtc) ++{ ++ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); ++ struct radeon_framebuffer *radeon_fb; ++ struct drm_framebuffer *fb; ++ struct drm_connector *connector; ++ struct fb_info *info; ++ struct radeonfb_par *par; ++ struct drm_mode_set *modeset; ++ unsigned int width, height; ++ int new_fb = 0; ++ int ret, i, conn_count; ++ ++ if (!drm_helper_crtc_in_use(crtc)) ++ return 0; ++ ++ if (!crtc->desired_mode) ++ return 0; ++ ++ width = crtc->desired_mode->hdisplay; ++ height = crtc->desired_mode->vdisplay; ++ ++ /* is there an fb bound to this crtc already */ ++ if (!radeon_crtc->mode_set.fb) { ++ ret = radeonfb_create(dev, width, height, width, height, &radeon_fb); ++ if (ret) ++ return -EINVAL; ++ new_fb = 1; ++ } else { ++ fb = radeon_crtc->mode_set.fb; ++ radeon_fb = to_radeon_framebuffer(fb); ++ if ((radeon_fb->base.width < width) || (radeon_fb->base.height < height)) ++ return -EINVAL; ++ } ++ ++ info = radeon_fb->base.fbdev; ++ par = info->par; ++ ++ modeset = &radeon_crtc->mode_set; ++ modeset->fb = &radeon_fb->base; ++ conn_count = 0; ++ list_for_each_entry(connector, &dev->mode_config.connector_list, head) { ++ if (connector->encoder) ++ if (connector->encoder->crtc == modeset->crtc) { ++ modeset->connectors[conn_count] = connector; ++ conn_count++; ++ if (conn_count > RADEONFB_CONN_LIMIT) ++ BUG(); ++ } ++ } ++ ++ for (i = conn_count; i < RADEONFB_CONN_LIMIT; i++) ++ modeset->connectors[i] = NULL; ++ ++ par->crtc_ids[0] = crtc->base.id; ++ ++ modeset->num_connectors = conn_count; ++ if (modeset->mode != modeset->crtc->desired_mode) ++ modeset->mode = modeset->crtc->desired_mode; ++ ++ par->crtc_count = 1; ++ ++ if (new_fb) { ++ info->var.pixclock = -1; ++ if (register_framebuffer(info) < 0) ++ return -EINVAL; ++ } else ++ radeonfb_set_par(info); ++ ++ printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node, ++ info->fix.id); ++ ++ /* Switch back to kernel console on panic */ ++ panic_mode = *modeset; ++ atomic_notifier_chain_register(&panic_notifier_list, &paniced); ++ printk(KERN_INFO "registered panic notifier\n"); ++ ++ return 0; ++} ++ ++static int radeonfb_multi_fb_probe(struct drm_device *dev) ++{ ++ ++ struct drm_crtc *crtc; ++ int ret = 0; ++ ++ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { ++ ret = radeonfb_multi_fb_probe_crtc(dev, crtc); ++ if (ret) ++ return ret; ++ } ++ return ret; ++} ++ ++static int radeonfb_single_fb_probe(struct drm_device *dev) ++{ ++ struct drm_crtc *crtc; ++ struct drm_connector *connector; ++ unsigned int fb_width = (unsigned)-1, fb_height = (unsigned)-1; ++ unsigned int surface_width = 0, surface_height = 0; ++ int new_fb = 0; ++ int crtc_count = 0; ++ int ret, i, conn_count = 0; ++ struct radeon_framebuffer *radeon_fb; ++ struct fb_info *info; ++ struct radeonfb_par *par; ++ struct drm_mode_set *modeset = NULL; ++ ++ DRM_DEBUG("\n"); ++ /* first up get a count of crtcs now in use and new min/maxes width/heights */ ++ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { ++ if (drm_helper_crtc_in_use(crtc)) { ++ if (crtc->desired_mode) { ++ if (crtc->desired_mode->hdisplay < fb_width) ++ fb_width = crtc->desired_mode->hdisplay; ++ ++ if (crtc->desired_mode->vdisplay < fb_height) ++ fb_height = crtc->desired_mode->vdisplay; ++ ++ if (crtc->desired_mode->hdisplay > surface_width) ++ surface_width = crtc->desired_mode->hdisplay; ++ ++ if (crtc->desired_mode->vdisplay > surface_height) ++ surface_height = crtc->desired_mode->vdisplay; ++ ++ } ++ crtc_count++; ++ } ++ } ++ ++ if (crtc_count == 0 || fb_width == -1 || fb_height == -1) { ++ /* hmm everyone went away - assume VGA cable just fell out ++ and will come back later. */ ++ return 0; ++ } ++ ++ /* do we have an fb already? */ ++ if (list_empty(&dev->mode_config.fb_kernel_list)) { ++ /* create an fb if we don't have one */ ++ ret = radeonfb_create(dev, fb_width, fb_height, surface_width, surface_height, &radeon_fb); ++ if (ret) ++ return -EINVAL; ++ new_fb = 1; ++ } else { ++ struct drm_framebuffer *fb; ++ fb = list_first_entry(&dev->mode_config.fb_kernel_list, struct drm_framebuffer, filp_head); ++ radeon_fb = to_radeon_framebuffer(fb); ++ ++ /* if someone hotplugs something bigger than we have already allocated, we are pwned. ++ As really we can't resize an fbdev that is in the wild currently due to fbdev ++ not really being designed for the lower layers moving stuff around under it. ++ - so in the grand style of things - punt. */ ++ if ((fb->width < surface_width) || (fb->height < surface_height)) { ++ DRM_ERROR("Framebuffer not large enough to scale console onto.\n"); ++ return -EINVAL; ++ } ++ } ++ ++ info = radeon_fb->base.fbdev; ++ par = info->par; ++ ++ crtc_count = 0; ++ /* okay we need to setup new connector sets in the crtcs */ ++ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { ++ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); ++ modeset = &radeon_crtc->mode_set; ++ modeset->fb = &radeon_fb->base; ++ conn_count = 0; ++ list_for_each_entry(connector, &dev->mode_config.connector_list, head) { ++ if (connector->encoder) ++ if(connector->encoder->crtc == modeset->crtc) { ++ modeset->connectors[conn_count] = connector; ++ conn_count++; ++ if (conn_count > RADEONFB_CONN_LIMIT) ++ BUG(); ++ } ++ } ++ ++ for (i = conn_count; i < RADEONFB_CONN_LIMIT; i++) ++ modeset->connectors[i] = NULL; ++ ++ ++ par->crtc_ids[crtc_count++] = crtc->base.id; ++ ++ modeset->num_connectors = conn_count; ++ if (modeset->mode != modeset->crtc->desired_mode) ++ modeset->mode = modeset->crtc->desired_mode; ++ } ++ par->crtc_count = crtc_count; ++ ++ if (new_fb) { ++ info->var.pixclock = -1; ++ if (register_framebuffer(info) < 0) ++ return -EINVAL; ++ } else ++ radeonfb_set_par(info); ++ ++ printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node, ++ info->fix.id); ++ ++ /* Switch back to kernel console on panic */ ++ panic_mode = *modeset; ++ atomic_notifier_chain_register(&panic_notifier_list, &paniced); ++ printk(KERN_INFO "registered panic notifier\n"); ++ ++ return 0; ++} ++ ++int radeonfb_probe(struct drm_device *dev) ++{ ++ int ret; ++ ++ DRM_DEBUG("\n"); ++ ++ /* something has changed in the lower levels of hell - deal with it ++ here */ ++ ++ /* two modes : a) 1 fb to rule all crtcs. ++ b) one fb per crtc. ++ two actions 1) new connected device ++ 2) device removed. ++ case a/1 : if the fb surface isn't big enough - resize the surface fb. ++ if the fb size isn't big enough - resize fb into surface. ++ if everything big enough configure the new crtc/etc. ++ case a/2 : undo the configuration ++ possibly resize down the fb to fit the new configuration. ++ case b/1 : see if it is on a new crtc - setup a new fb and add it. ++ case b/2 : teardown the new fb. ++ */ ++ ++ /* mode a first */ ++ /* search for an fb */ ++ // if (radeon_fbpercrtc == 1) { ++ // ret = radeonfb_multi_fb_probe(dev); ++ // } else { ++ ret = radeonfb_single_fb_probe(dev); ++ // } ++ ++ return ret; ++} ++EXPORT_SYMBOL(radeonfb_probe); ++ ++int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb) ++{ ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ struct fb_info *info; ++ struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb); ++ ++ if (!fb) ++ return -EINVAL; ++ ++ info = fb->fbdev; ++ ++ if (info) { ++ unregister_framebuffer(info); ++ drm_bo_kunmap(&radeon_fb->kmap_obj); ++ dev_priv->mm.vram_visible += radeon_fb->obj->size; ++ mutex_lock(&dev->struct_mutex); ++ drm_gem_object_unreference(radeon_fb->obj); ++ radeon_fb->obj = NULL; ++ mutex_unlock(&dev->struct_mutex); ++ framebuffer_release(info); ++ } ++ ++ atomic_notifier_chain_unregister(&panic_notifier_list, &paniced); ++ memset(&panic_mode, 0, sizeof(struct drm_mode_set)); ++ return 0; ++} ++EXPORT_SYMBOL(radeonfb_remove); ++MODULE_LICENSE("GPL"); +diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c +new file mode 100644 +index 0000000..13af804 +--- /dev/null ++++ b/drivers/gpu/drm/radeon/radeon_fence.c +@@ -0,0 +1,99 @@ ++/************************************************************************** ++ * ++ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sub license, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, ++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR ++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE ++ * USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial portions ++ * of the Software. ++ * ++ * ++ **************************************************************************/ ++/* ++ * Authors: Thomas Hellström ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "radeon_drm.h" ++#include "radeon_drv.h" ++ ++int radeon_fence_emit_sequence(struct drm_device *dev, uint32_t class, ++ uint32_t flags, uint32_t *sequence, ++ uint32_t *native_type) ++{ ++ struct drm_radeon_private *dev_priv = (struct drm_radeon_private *) dev->dev_private; ++ ++ if (!dev_priv) ++ return -EINVAL; ++ ++ radeon_emit_irq(dev); ++ ++ DRM_DEBUG("emitting %d\n", dev_priv->counter); ++ *sequence = (uint32_t) dev_priv->counter; ++ *native_type = DRM_FENCE_TYPE_EXE; ++ ++ return 0; ++} ++ ++static void radeon_fence_poll(struct drm_device *dev, uint32_t fence_class, ++ uint32_t waiting_types) ++{ ++ struct drm_radeon_private *dev_priv = (struct drm_radeon_private *) dev->dev_private; ++ uint32_t sequence; ++ ++ sequence = RADEON_READ(RADEON_SCRATCH_REG3); ++ /* this used to be READ_BREADCRUMB(dev_priv); but it caused ++ * a race somewhere in the fencing irq ++ */ ++ ++ DRM_DEBUG("polling %d\n", sequence); ++ drm_fence_handler(dev, 0, sequence, ++ DRM_FENCE_TYPE_EXE, 0); ++} ++ ++void radeon_fence_handler(struct drm_device * dev) ++{ ++ struct drm_fence_manager *fm = &dev->fm; ++ struct drm_fence_class_manager *fc = &fm->fence_class[0]; ++ ++ write_lock(&fm->lock); ++ radeon_fence_poll(dev, 0, fc->waiting_types); ++ write_unlock(&fm->lock); ++} ++ ++int radeon_fence_has_irq(struct drm_device *dev, uint32_t class, uint32_t flags) ++{ ++ /* ++ * We have an irq that tells us when we have a new breadcrumb. ++ */ ++ return 1; ++} ++ ++ ++struct drm_fence_driver radeon_fence_driver = { ++ .num_classes = 1, ++ .wrap_diff = (1U << (BREADCRUMB_BITS -1)), ++ .flush_diff = (1U << (BREADCRUMB_BITS - 2)), ++ .sequence_mask = BREADCRUMB_MASK, ++ .emit = radeon_fence_emit_sequence, ++ .has_irq = radeon_fence_has_irq, ++ .poll = radeon_fence_poll, ++}; ++ +diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c +new file mode 100644 +index 0000000..fff027e +--- /dev/null ++++ b/drivers/gpu/drm/radeon/radeon_gem.c +@@ -0,0 +1,1548 @@ ++/* ++ * Copyright 2008 Red Hat Inc. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ * ++ * Author: Dave Airlie ++ */ ++#include "drmP.h" ++#include "drm.h" ++ ++#include "radeon_drm.h" ++#include "radeon_drv.h" ++ ++static int radeon_gem_ib_init(struct drm_device *dev); ++static int radeon_gem_ib_destroy(struct drm_device *dev); ++ ++int radeon_gem_init_object(struct drm_gem_object *obj) ++{ ++ struct drm_radeon_gem_object *obj_priv; ++ ++ obj_priv = drm_calloc(1, sizeof(*obj_priv), DRM_MEM_DRIVER); ++ if (!obj_priv) { ++ return -ENOMEM; ++ } ++ ++ obj->driver_private = obj_priv; ++ obj_priv->obj = obj; ++ return 0; ++} ++ ++void radeon_gem_free_object(struct drm_gem_object *obj) ++{ ++ ++ struct drm_radeon_gem_object *obj_priv = obj->driver_private; ++ ++ /* tear down the buffer object - gem holds struct mutex */ ++ drm_bo_takedown_vm_locked(obj_priv->bo); ++ drm_bo_usage_deref_locked(&obj_priv->bo); ++ drm_free(obj->driver_private, 1, DRM_MEM_DRIVER); ++} ++ ++int radeon_gem_info_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ struct drm_radeon_gem_info *args = data; ++ ++ args->vram_start = dev_priv->mm.vram_offset; ++ args->vram_size = dev_priv->mm.vram_size; ++ args->vram_visible = dev_priv->mm.vram_visible; ++ ++ args->gart_start = dev_priv->mm.gart_start; ++ args->gart_size = dev_priv->mm.gart_useable; ++ ++ return 0; ++} ++ ++struct drm_gem_object *radeon_gem_object_alloc(struct drm_device *dev, int size, int alignment, ++ int initial_domain, bool discardable) ++{ ++ struct drm_gem_object *obj; ++ struct drm_radeon_gem_object *obj_priv; ++ int ret; ++ uint32_t flags; ++ uint32_t page_align; ++ ++ obj = drm_gem_object_alloc(dev, size); ++ if (!obj) ++ return NULL; ++ ++ obj_priv = obj->driver_private; ++ flags = DRM_BO_FLAG_MAPPABLE; ++ if (initial_domain == RADEON_GEM_DOMAIN_VRAM) ++ flags |= DRM_BO_FLAG_MEM_VRAM; ++ else if (initial_domain == RADEON_GEM_DOMAIN_GTT) ++ flags |= DRM_BO_FLAG_MEM_TT; ++ else ++ flags |= DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED; ++ ++ flags |= DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE | DRM_BO_FLAG_EXE; ++ ++ if (discardable) ++ flags |= DRM_BO_FLAG_DISCARDABLE; ++ ++ if (alignment == 0) ++ alignment = PAGE_SIZE; ++ ++ page_align = alignment >> PAGE_SHIFT; ++ /* create a TTM BO */ ++ ret = drm_buffer_object_create(dev, ++ size, drm_bo_type_device, ++ flags, 0, page_align, ++ 0, &obj_priv->bo); ++ if (ret) ++ goto fail; ++ ++ DRM_DEBUG("%p : size 0x%x, alignment %d, initial_domain %d\n", obj_priv->bo, size, alignment, initial_domain); ++ return obj; ++fail: ++ ++ return NULL; ++} ++ ++int radeon_gem_create_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_radeon_gem_create *args = data; ++ struct drm_radeon_gem_object *obj_priv; ++ struct drm_gem_object *obj; ++ int ret = 0; ++ int handle; ++ ++ /* create a gem object to contain this object in */ ++ args->size = roundup(args->size, PAGE_SIZE); ++ ++ obj = radeon_gem_object_alloc(dev, args->size, args->alignment, args->initial_domain, args->no_backing_store); ++ if (!obj) ++ return -EINVAL; ++ ++ obj_priv = obj->driver_private; ++ DRM_DEBUG("obj is %p bo is %p, %d\n", obj, obj_priv->bo, obj_priv->bo->num_pages); ++ ret = drm_gem_handle_create(file_priv, obj, &handle); ++ mutex_lock(&dev->struct_mutex); ++ drm_gem_object_handle_unreference(obj); ++ mutex_unlock(&dev->struct_mutex); ++ ++ if (ret) ++ goto fail; ++ ++ args->handle = handle; ++ ++ return 0; ++fail: ++ mutex_lock(&dev->struct_mutex); ++ drm_gem_object_unreference(obj); ++ mutex_unlock(&dev->struct_mutex); ++ ++ return ret; ++} ++ ++int radeon_gem_set_domain(struct drm_gem_object *obj, uint32_t read_domains, uint32_t write_domain, uint32_t *flags_p, bool unfenced) ++{ ++ struct drm_radeon_gem_object *obj_priv; ++ uint32_t flags = 0; ++ int ret; ++ ++ obj_priv = obj->driver_private; ++ ++ /* work out where to validate the buffer to */ ++ if (write_domain) { /* write domains always win */ ++ if (write_domain == RADEON_GEM_DOMAIN_VRAM) ++ flags = DRM_BO_FLAG_MEM_VRAM; ++ else if (write_domain == RADEON_GEM_DOMAIN_GTT) ++ flags = DRM_BO_FLAG_MEM_TT; // need a can write gart check ++ else ++ return -EINVAL; // we can't write to system RAM ++ } else { ++ /* okay for a read domain - prefer wherever the object is now or close enough */ ++ if (read_domains == 0) ++ return -EINVAL; ++ ++ /* if its already a local memory and CPU is valid do nothing */ ++ if (read_domains & RADEON_GEM_DOMAIN_CPU) { ++ if (obj_priv->bo->mem.mem_type == DRM_BO_MEM_LOCAL) ++ return 0; ++ if (read_domains == RADEON_GEM_DOMAIN_CPU) ++ return -EINVAL; ++ } ++ ++ /* simple case no choice in domains */ ++ if (read_domains == RADEON_GEM_DOMAIN_VRAM) ++ flags = DRM_BO_FLAG_MEM_VRAM; ++ else if (read_domains == RADEON_GEM_DOMAIN_GTT) ++ flags = DRM_BO_FLAG_MEM_TT; ++ else if ((obj_priv->bo->mem.mem_type == DRM_BO_MEM_VRAM) && (read_domains & RADEON_GEM_DOMAIN_VRAM)) ++ flags = DRM_BO_FLAG_MEM_VRAM; ++ else if ((obj_priv->bo->mem.mem_type == DRM_BO_MEM_TT) && (read_domains & RADEON_GEM_DOMAIN_GTT)) ++ flags = DRM_BO_FLAG_MEM_TT; ++ else if ((obj_priv->bo->mem.mem_type == DRM_BO_MEM_LOCAL) && (read_domains & RADEON_GEM_DOMAIN_GTT)) ++ flags = DRM_BO_FLAG_MEM_TT; ++ ++ /* no idea here just set whatever we are input */ ++ if (flags == 0) { ++ if (read_domains & RADEON_GEM_DOMAIN_VRAM) ++ flags |= DRM_BO_FLAG_MEM_VRAM; ++ if (read_domains & RADEON_GEM_DOMAIN_GTT) ++ flags |= DRM_BO_FLAG_MEM_TT; ++ } ++ } ++ ++ /* if this BO is pinned then we ain't moving it anywhere */ ++ if (obj_priv->bo->pinned_mem_type && unfenced) ++ return 0; ++ ++ DRM_DEBUG("validating %p from %d into %x %d %d\n", obj_priv->bo, obj_priv->bo->mem.mem_type, flags, read_domains, write_domain); ++ ret = drm_bo_do_validate(obj_priv->bo, flags, DRM_BO_MASK_MEM | DRM_BO_FLAG_CACHED, ++ unfenced ? DRM_BO_HINT_DONT_FENCE : 0, 0); ++ if (ret) ++ return ret; ++ ++ if (flags_p) ++ *flags_p = flags; ++ return 0; ++ ++} ++ ++int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ /* transition the BO to a domain - just validate the BO into a certain domain */ ++ struct drm_radeon_gem_set_domain *args = data; ++ struct drm_gem_object *obj; ++ struct drm_radeon_gem_object *obj_priv; ++ int ret; ++ ++ /* for now if someone requests domain CPU - just make sure the buffer is finished with */ ++ ++ /* just do a BO wait for now */ ++ obj = drm_gem_object_lookup(dev, file_priv, args->handle); ++ if (obj == NULL) ++ return -EINVAL; ++ ++ obj_priv = obj->driver_private; ++ ++ ret = radeon_gem_set_domain(obj, args->read_domains, args->write_domain, NULL, true); ++ ++ mutex_lock(&dev->struct_mutex); ++ drm_gem_object_unreference(obj); ++ mutex_unlock(&dev->struct_mutex); ++ return ret; ++} ++ ++int radeon_gem_pread_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ return -ENOSYS; ++} ++ ++int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_radeon_gem_pwrite *args = data; ++ struct drm_gem_object *obj; ++ struct drm_radeon_gem_object *obj_priv; ++ int ret; ++ ++ obj = drm_gem_object_lookup(dev, file_priv, args->handle); ++ if (obj == NULL) ++ return -EINVAL; ++ ++ obj_priv = obj->driver_private; ++ ++ /* check where the buffer is first - if not in VRAM ++ fallback to userspace copying for now */ ++ mutex_lock(&obj_priv->bo->mutex); ++ if (obj_priv->bo->mem.mem_type != DRM_BO_MEM_VRAM) { ++ ret = -EINVAL; ++ goto out_unlock; ++ } ++ ++ DRM_ERROR("pwriting data->size %lld %llx\n", args->size, args->offset); ++ ret = -EINVAL; ++ ++#if 0 ++ /* so need to grab an IB, copy the data into it in a loop ++ and send them to VRAM using HDB */ ++ while ((buf = radeon_host_data_blit(dev, cpp, w, dst_pitch_off, &buf_pitch, ++ x, &y, (unsigned int*)&h, &hpass)) != 0) { ++ radeon_host_data_blit_copy_pass(dev, cpp, buf, (uint8_t *)src, ++ hpass, buf_pitch, src_pitch); ++ src += hpass * src_pitch; ++ } ++#endif ++out_unlock: ++ mutex_unlock(&obj_priv->bo->mutex); ++ mutex_lock(&dev->struct_mutex); ++ drm_gem_object_unreference(obj); ++ mutex_unlock(&dev->struct_mutex); ++ return ret; ++} ++ ++int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_radeon_gem_mmap *args = data; ++ struct drm_gem_object *obj; ++ struct drm_radeon_gem_object *obj_priv; ++ loff_t offset; ++ unsigned long addr; ++ ++ obj = drm_gem_object_lookup(dev, file_priv, args->handle); ++ if (obj == NULL) ++ return -EINVAL; ++ ++ offset = args->offset; ++ ++ DRM_DEBUG("got here %p\n", obj); ++ obj_priv = obj->driver_private; ++ ++ DRM_DEBUG("got here %p %p %lld %ld\n", obj, obj_priv->bo, args->size, obj_priv->bo->num_pages); ++ if (!obj_priv->bo) { ++ mutex_lock(&dev->struct_mutex); ++ drm_gem_object_unreference(obj); ++ mutex_unlock(&dev->struct_mutex); ++ return -EINVAL; ++ } ++ ++ down_write(¤t->mm->mmap_sem); ++ addr = do_mmap_pgoff(file_priv->filp, 0, args->size, ++ PROT_READ | PROT_WRITE, MAP_SHARED, ++ obj_priv->bo->map_list.hash.key); ++ up_write(¤t->mm->mmap_sem); ++ ++ DRM_DEBUG("got here %p %d\n", obj, obj_priv->bo->mem.mem_type); ++ mutex_lock(&dev->struct_mutex); ++ drm_gem_object_unreference(obj); ++ mutex_unlock(&dev->struct_mutex); ++ if (IS_ERR((void *)addr)) ++ return addr; ++ ++ args->addr_ptr = (uint64_t) addr; ++ ++ return 0; ++ ++} ++ ++int radeon_gem_pin_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_radeon_gem_pin *args = data; ++ struct drm_gem_object *obj; ++ struct drm_radeon_gem_object *obj_priv; ++ int ret; ++ int flags = DRM_BO_FLAG_NO_EVICT; ++ int mask = DRM_BO_FLAG_NO_EVICT; ++ ++ /* check for valid args */ ++ if (args->pin_domain) { ++ mask |= DRM_BO_MASK_MEM; ++ if (args->pin_domain == RADEON_GEM_DOMAIN_GTT) ++ flags |= DRM_BO_FLAG_MEM_TT; ++ else if (args->pin_domain == RADEON_GEM_DOMAIN_VRAM) ++ flags |= DRM_BO_FLAG_MEM_VRAM; ++ else /* hand back the offset we currently have if no args supplied ++ - this is to allow old mesa to work - its a hack */ ++ flags = 0; ++ } ++ ++ obj = drm_gem_object_lookup(dev, file_priv, args->handle); ++ if (obj == NULL) ++ return -EINVAL; ++ ++ obj_priv = obj->driver_private; ++ ++ /* validate into a pin with no fence */ ++ DRM_DEBUG("got here %p %p %d\n", obj, obj_priv->bo, atomic_read(&obj_priv->bo->usage)); ++ if (flags && !(obj_priv->bo->type != drm_bo_type_kernel && !DRM_SUSER(DRM_CURPROC))) { ++ ret = drm_bo_do_validate(obj_priv->bo, flags, mask, ++ DRM_BO_HINT_DONT_FENCE, 0); ++ } else ++ ret = 0; ++ ++ args->offset = obj_priv->bo->offset; ++ DRM_DEBUG("got here %p %p %x\n", obj, obj_priv->bo, obj_priv->bo->offset); ++ ++ mutex_lock(&dev->struct_mutex); ++ drm_gem_object_unreference(obj); ++ mutex_unlock(&dev->struct_mutex); ++ return ret; ++} ++ ++int radeon_gem_unpin_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_radeon_gem_unpin *args = data; ++ struct drm_gem_object *obj; ++ struct drm_radeon_gem_object *obj_priv; ++ int ret; ++ ++ obj = drm_gem_object_lookup(dev, file_priv, args->handle); ++ if (obj == NULL) ++ return -EINVAL; ++ ++ obj_priv = obj->driver_private; ++ ++ /* validate into a pin with no fence */ ++ ++ ret = drm_bo_do_validate(obj_priv->bo, 0, DRM_BO_FLAG_NO_EVICT, ++ DRM_BO_HINT_DONT_FENCE, 0); ++ ++ mutex_lock(&dev->struct_mutex); ++ drm_gem_object_unreference(obj); ++ mutex_unlock(&dev->struct_mutex); ++ return ret; ++} ++ ++int radeon_gem_busy(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ return 0; ++} ++ ++int radeon_gem_wait_rendering(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_radeon_gem_wait_rendering *args = data; ++ struct drm_gem_object *obj; ++ struct drm_radeon_gem_object *obj_priv; ++ int ret; ++ ++ ++ obj = drm_gem_object_lookup(dev, file_priv, args->handle); ++ if (obj == NULL) ++ return -EINVAL; ++ ++ obj_priv = obj->driver_private; ++ ++ mutex_lock(&obj_priv->bo->mutex); ++ ret = drm_bo_wait(obj_priv->bo, 0, 1, 1, 0); ++ mutex_unlock(&obj_priv->bo->mutex); ++ ++ mutex_lock(&dev->struct_mutex); ++ drm_gem_object_unreference(obj); ++ mutex_unlock(&dev->struct_mutex); ++ return ret; ++} ++ ++ ++ ++/* ++ * Depending on card genertation, chipset bugs, etc... the amount of vram ++ * accessible to the CPU can vary. This function is our best shot at figuring ++ * it out. Returns a value in KB. ++ */ ++static uint32_t radeon_get_accessible_vram(struct drm_device *dev) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ uint32_t aper_size; ++ u8 byte; ++ ++ if (dev_priv->chip_family >= CHIP_R600) ++ aper_size = RADEON_READ(R600_CONFIG_APER_SIZE) / 1024; ++ else ++ aper_size = RADEON_READ(RADEON_CONFIG_APER_SIZE) / 1024; ++ ++ /* Set HDP_APER_CNTL only on cards that are known not to be broken, ++ * that is has the 2nd generation multifunction PCI interface ++ */ ++ if (dev_priv->chip_family == CHIP_RV280 || ++ dev_priv->chip_family == CHIP_RV350 || ++ dev_priv->chip_family == CHIP_RV380 || ++ dev_priv->chip_family == CHIP_R420 || ++ dev_priv->chip_family == CHIP_R423 || ++ dev_priv->chip_family == CHIP_RV410 || ++ radeon_is_avivo(dev_priv)) { ++ uint32_t temp = RADEON_READ(RADEON_HOST_PATH_CNTL); ++ temp |= RADEON_HDP_APER_CNTL; ++ RADEON_WRITE(RADEON_HOST_PATH_CNTL, temp); ++ return aper_size * 2; ++ } ++ ++ /* Older cards have all sorts of funny issues to deal with. First ++ * check if it's a multifunction card by reading the PCI config ++ * header type... Limit those to one aperture size ++ */ ++ pci_read_config_byte(dev->pdev, 0xe, &byte); ++ if (byte & 0x80) ++ return aper_size; ++ ++ /* Single function older card. We read HDP_APER_CNTL to see how the BIOS ++ * have set it up. We don't write this as it's broken on some ASICs but ++ * we expect the BIOS to have done the right thing (might be too optimistic...) ++ */ ++ if (RADEON_READ(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL) ++ return aper_size * 2; ++ ++ return aper_size; ++} ++ ++/* code from the DDX - do memory sizing */ ++void radeon_vram_setup(struct drm_device *dev) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ uint32_t vram; ++ uint32_t accessible, bar_size; ++ ++ if (!radeon_is_avivo(dev_priv) && (dev_priv->flags & RADEON_IS_IGP)) { ++ uint32_t tom = RADEON_READ(RADEON_NB_TOM); ++ ++ vram = (((tom >> 16) - (tom & 0xffff) + 1) << 6); ++ RADEON_WRITE(RADEON_CONFIG_MEMSIZE, vram * 1024); ++ } else { ++ if (dev_priv->chip_family >= CHIP_R600) ++ vram = RADEON_READ(R600_CONFIG_MEMSIZE) / 1024; ++ else { ++ vram = RADEON_READ(RADEON_CONFIG_MEMSIZE) / 1024; ++ ++ /* Some production boards of m6 will return 0 if it's 8 MB */ ++ if (vram == 0) { ++ vram = 8192; ++ RADEON_WRITE(RADEON_CONFIG_MEMSIZE, 0x800000); ++ } ++ } ++ } ++ ++ accessible = radeon_get_accessible_vram(dev); ++ ++ bar_size = drm_get_resource_len(dev, 0) / 1024; ++ if (bar_size == 0) ++ bar_size = 0x20000; ++ if (accessible > bar_size) ++ accessible = bar_size; ++ ++ if (accessible > vram) ++ accessible = vram; ++ ++ DRM_INFO("Detected VRAM RAM=%dK, accessible=%uK, BAR=%uK\n", ++ vram, accessible, bar_size); ++ ++ dev_priv->mm.vram_offset = dev_priv->fb_aper_offset; ++ dev_priv->mm.vram_size = vram * 1024; ++ dev_priv->mm.vram_visible = accessible * 1024; ++ ++ ++} ++ ++static int radeon_gart_init(struct drm_device *dev) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ int ret; ++ u32 base = 0; ++ ++ /* setup a 32MB GART */ ++ dev_priv->gart_size = dev_priv->mm.gart_size; ++ ++ /* work out table size from GART size - do the math for show ++ * table is one dword per 4k page. ++ */ ++ dev_priv->gart_info.table_size = (dev_priv->gart_size / 4096) * sizeof(uint32_t); ++ ++#if __OS_HAS_AGP ++ /* setup VRAM vs GART here */ ++ if (dev_priv->flags & RADEON_IS_AGP) { ++ base = dev->agp->base; ++ if ((base + dev_priv->gart_size - 1) >= dev_priv->fb_location && ++ base < (dev_priv->fb_location + dev_priv->fb_size - 1)) { ++ DRM_INFO("Can't use agp base @0x%08lx, won't fit\n", ++ dev->agp->base); ++ base = 0; ++ } ++ } ++#endif ++ ++ if (base == 0) { ++ base = dev_priv->fb_location + dev_priv->fb_size; ++ if (base < dev_priv->fb_location || ++ ((base + dev_priv->gart_size) & 0xfffffffful) < base) ++ base = dev_priv->fb_location ++ - dev_priv->gart_size; ++ } ++ /* start on the card */ ++ dev_priv->gart_vm_start = base & 0xffc00000u; ++ if (dev_priv->gart_vm_start != base) ++ DRM_INFO("GART aligned down from 0x%08x to 0x%08x\n", ++ base, dev_priv->gart_vm_start); ++ ++ /* if on PCIE we need to allocate an fb object for the PCIE GART table */ ++ if (dev_priv->flags & RADEON_IS_PCIE) { ++ ret = drm_buffer_object_create(dev, dev_priv->gart_info.table_size, ++ drm_bo_type_kernel, ++ DRM_BO_FLAG_READ | DRM_BO_FLAG_MEM_VRAM | DRM_BO_FLAG_MAPPABLE | DRM_BO_FLAG_NO_EVICT, ++ 0, 1, 0, &dev_priv->mm.pcie_table.bo); ++ if (ret) ++ return -EINVAL; ++ ++ /* subtract from VRAM value reporting to userspace */ ++ dev_priv->mm.vram_visible -= dev_priv->gart_info.table_size; ++ ++ dev_priv->mm.pcie_table_backup = kzalloc(dev_priv->gart_info.table_size, GFP_KERNEL); ++ if (!dev_priv->mm.pcie_table_backup) ++ return -EINVAL; ++ ++ ret = drm_bo_kmap(dev_priv->mm.pcie_table.bo, 0, dev_priv->gart_info.table_size >> PAGE_SHIFT, ++ &dev_priv->mm.pcie_table.kmap); ++ if (ret) ++ return -EINVAL; ++ ++ dev_priv->pcigart_offset_set = 2; ++ dev_priv->gart_info.bus_addr = dev_priv->fb_location + dev_priv->mm.pcie_table.bo->offset; ++ dev_priv->gart_info.addr = dev_priv->mm.pcie_table.kmap.virtual; ++ dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCIE; ++ dev_priv->gart_info.gart_table_location = DRM_ATI_GART_FB; ++ memset(dev_priv->gart_info.addr, 0, dev_priv->gart_info.table_size); ++ } else if (!(dev_priv->flags & RADEON_IS_AGP)) { ++ /* allocate PCI GART table */ ++ dev_priv->gart_info.table_mask = DMA_BIT_MASK(32); ++ dev_priv->gart_info.gart_table_location = DRM_ATI_GART_MAIN; ++ if (dev_priv->flags & RADEON_IS_IGPGART) ++ dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_IGP; ++ else ++ dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCI; ++ ++ ret = drm_ati_alloc_pcigart_table(dev, &dev_priv->gart_info); ++ if (ret) { ++ DRM_ERROR("cannot allocate PCI GART page!\n"); ++ return -EINVAL; ++ } ++ ++ dev_priv->gart_info.addr = dev_priv->gart_info.table_handle->vaddr; ++ dev_priv->gart_info.bus_addr = dev_priv->gart_info.table_handle->busaddr; ++ } ++ ++ /* gart values setup - start the GART */ ++ if (dev_priv->flags & RADEON_IS_AGP) { ++ radeon_set_pcigart(dev_priv, 0); ++ /* enable AGP GART bits */ ++ ++ DRM_INFO("setting agp_base to %x\n", dev->agp->base); ++ radeon_write_agp_base(dev_priv, dev->agp->base); ++ ++ DRM_INFO("setting agp_location to %x\n", dev_priv->gart_vm_start); ++ radeon_write_agp_location(dev_priv, ++ (((dev_priv->gart_vm_start - 1 + ++ dev_priv->gart_size) & 0xffff0000) | ++ (dev_priv->gart_vm_start >> 16)), 0); ++ ++ } else { ++ radeon_set_pcigart(dev_priv, 1); ++ } ++ ++ return 0; ++} ++ ++int radeon_alloc_gart_objects(struct drm_device *dev) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ int ret; ++ ++ ret = drm_buffer_object_create(dev, RADEON_DEFAULT_RING_SIZE, ++ drm_bo_type_kernel, ++ DRM_BO_FLAG_READ | DRM_BO_FLAG_MEM_TT | ++ DRM_BO_FLAG_MAPPABLE | DRM_BO_FLAG_NO_EVICT, ++ 0, 1, 0, &dev_priv->mm.ring.bo); ++ if (ret) { ++ if (dev_priv->flags & RADEON_IS_AGP) ++ DRM_ERROR("failed to allocate ring - most likely an AGP driver bug\n"); ++ else ++ DRM_ERROR("failed to allocate ring\n"); ++ return -EINVAL; ++ } ++ ++ ret = drm_bo_kmap(dev_priv->mm.ring.bo, 0, RADEON_DEFAULT_RING_SIZE >> PAGE_SHIFT, ++ &dev_priv->mm.ring.kmap); ++ if (ret) { ++ DRM_ERROR("failed to map ring\n"); ++ return -EINVAL; ++ } ++ ++ ret = drm_buffer_object_create(dev, PAGE_SIZE, ++ drm_bo_type_kernel, ++ DRM_BO_FLAG_WRITE |DRM_BO_FLAG_READ | DRM_BO_FLAG_MEM_TT | ++ DRM_BO_FLAG_MAPPABLE | DRM_BO_FLAG_NO_EVICT, ++ 0, 1, 0, &dev_priv->mm.ring_read.bo); ++ if (ret) { ++ DRM_ERROR("failed to allocate ring read\n"); ++ return -EINVAL; ++ } ++ ++ ret = drm_bo_kmap(dev_priv->mm.ring_read.bo, 0, ++ PAGE_SIZE >> PAGE_SHIFT, ++ &dev_priv->mm.ring_read.kmap); ++ if (ret) { ++ DRM_ERROR("failed to map ring read\n"); ++ return -EINVAL; ++ } ++ ++ DRM_DEBUG("Ring ptr %p mapped at %ld %p, read ptr %p maped at %ld %p\n", ++ dev_priv->mm.ring.bo, dev_priv->mm.ring.bo->offset, dev_priv->mm.ring.kmap.virtual, ++ dev_priv->mm.ring_read.bo, dev_priv->mm.ring_read.bo->offset, dev_priv->mm.ring_read.kmap.virtual); ++ ++ dev_priv->mm.gart_useable -= RADEON_DEFAULT_RING_SIZE + PAGE_SIZE; ++ ++ /* init the indirect buffers */ ++ radeon_gem_ib_init(dev); ++ return 0; ++ ++} ++ ++static bool avivo_get_mc_idle(struct drm_device *dev) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ ++ if (dev_priv->chip_family >= CHIP_R600) { ++ /* no idea where this is on r600 yet */ ++ return true; ++ } else if (dev_priv->chip_family == CHIP_RV515) { ++ if (radeon_read_mc_reg(dev_priv, RV515_MC_STATUS) & RV515_MC_STATUS_IDLE) ++ return true; ++ else ++ return false; ++ } else if (dev_priv->chip_family == CHIP_RS600) { ++ if (radeon_read_mc_reg(dev_priv, RS600_MC_STATUS) & RS600_MC_STATUS_IDLE) ++ return true; ++ else ++ return false; ++ } else if ((dev_priv->chip_family == CHIP_RS690) || ++ (dev_priv->chip_family == CHIP_RS740)) { ++ if (radeon_read_mc_reg(dev_priv, RS690_MC_STATUS) & RS690_MC_STATUS_IDLE) ++ return true; ++ else ++ return false; ++ } else { ++ if (radeon_read_mc_reg(dev_priv, R520_MC_STATUS) & R520_MC_STATUS_IDLE) ++ return true; ++ else ++ return false; ++ } ++} ++ ++ ++static void avivo_disable_mc_clients(struct drm_device *dev) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ uint32_t tmp; ++ int timeout; ++ ++ radeon_do_wait_for_idle(dev_priv); ++ ++ RADEON_WRITE(AVIVO_D1VGA_CONTROL, RADEON_READ(AVIVO_D1VGA_CONTROL) & ~AVIVO_DVGA_CONTROL_MODE_ENABLE); ++ RADEON_WRITE(AVIVO_D2VGA_CONTROL, RADEON_READ(AVIVO_D2VGA_CONTROL) & ~AVIVO_DVGA_CONTROL_MODE_ENABLE); ++ ++ tmp = RADEON_READ(AVIVO_D1CRTC_CONTROL); ++ RADEON_WRITE(AVIVO_D1CRTC_CONTROL, tmp & ~AVIVO_CRTC_EN); ++ ++ tmp = RADEON_READ(AVIVO_D2CRTC_CONTROL); ++ RADEON_WRITE(AVIVO_D2CRTC_CONTROL, tmp & ~AVIVO_CRTC_EN); ++ ++ tmp = RADEON_READ(AVIVO_D2CRTC_CONTROL); ++ ++ udelay(1000); ++ ++ timeout = 0; ++ while (!(avivo_get_mc_idle(dev))) { ++ if (++timeout > 100000) { ++ DRM_ERROR("Timeout waiting for memory controller to update settings\n"); ++ DRM_ERROR("Bad things may or may not happen\n"); ++ } ++ udelay(10); ++ } ++} ++ ++static inline u32 radeon_busy_wait(struct drm_device *dev, uint32_t reg, uint32_t bits, ++ unsigned int timeout) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ u32 status; ++ ++ do { ++ udelay(10); ++ status = RADEON_READ(reg); ++ timeout--; ++ } while(status != 0xffffffff && (status & bits) && (timeout > 0)); ++ ++ if (timeout == 0) ++ status = 0xffffffff; ++ ++ return status; ++} ++ ++/* Wait for vertical sync on primary CRTC */ ++static void radeon_wait_for_vsync(struct drm_device *dev) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ uint32_t crtc_gen_cntl; ++ ++ crtc_gen_cntl = RADEON_READ(RADEON_CRTC_GEN_CNTL); ++ if ((crtc_gen_cntl & RADEON_CRTC_DISP_REQ_EN_B) || ++ !(crtc_gen_cntl & RADEON_CRTC_EN)) ++ return; ++ ++ /* Clear the CRTC_VBLANK_SAVE bit */ ++ RADEON_WRITE(RADEON_CRTC_STATUS, RADEON_CRTC_VBLANK_SAVE_CLEAR); ++ ++ radeon_busy_wait(dev, RADEON_CRTC_STATUS, RADEON_CRTC_VBLANK_SAVE, 2000); ++ ++} ++ ++/* Wait for vertical sync on primary CRTC */ ++static void radeon_wait_for_vsync2(struct drm_device *dev) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ uint32_t crtc2_gen_cntl; ++ ++ crtc2_gen_cntl = RADEON_READ(RADEON_CRTC2_GEN_CNTL); ++ if ((crtc2_gen_cntl & RADEON_CRTC2_DISP_REQ_EN_B) || ++ !(crtc2_gen_cntl & RADEON_CRTC2_EN)) ++ return; ++ ++ /* Clear the CRTC_VBLANK_SAVE bit */ ++ RADEON_WRITE(RADEON_CRTC2_STATUS, RADEON_CRTC2_VBLANK_SAVE_CLEAR); ++ ++ radeon_busy_wait(dev, RADEON_CRTC2_STATUS, RADEON_CRTC2_VBLANK_SAVE, 2000); ++} ++ ++static void legacy_disable_mc_clients(struct drm_device *dev) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ uint32_t old_mc_status, status_idle; ++ uint32_t ov0_scale_cntl, crtc_ext_cntl, crtc_gen_cntl, crtc2_gen_cntl; ++ uint32_t status; ++ ++ radeon_do_wait_for_idle(dev_priv); ++ ++ if (dev_priv->flags & RADEON_IS_IGP) ++ return; ++ ++ old_mc_status = RADEON_READ(RADEON_MC_STATUS); ++ ++ /* stop display and memory access */ ++ ov0_scale_cntl = RADEON_READ(RADEON_OV0_SCALE_CNTL); ++ RADEON_WRITE(RADEON_OV0_SCALE_CNTL, ov0_scale_cntl & ~RADEON_SCALER_ENABLE); ++ crtc_ext_cntl = RADEON_READ(RADEON_CRTC_EXT_CNTL); ++ RADEON_WRITE(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl | RADEON_CRTC_DISPLAY_DIS); ++ crtc_gen_cntl = RADEON_READ(RADEON_CRTC_GEN_CNTL); ++ ++ radeon_wait_for_vsync(dev); ++ ++ RADEON_WRITE(RADEON_CRTC_GEN_CNTL, ++ (crtc_gen_cntl & ~(RADEON_CRTC_CUR_EN | RADEON_CRTC_ICON_EN)) | ++ RADEON_CRTC_DISP_REQ_EN_B | RADEON_CRTC_EXT_DISP_EN); ++ ++ if (!(dev_priv->flags & RADEON_SINGLE_CRTC)) { ++ crtc2_gen_cntl = RADEON_READ(RADEON_CRTC2_GEN_CNTL); ++ ++ radeon_wait_for_vsync2(dev); ++ RADEON_WRITE(RADEON_CRTC2_GEN_CNTL, ++ (crtc2_gen_cntl & ++ ~(RADEON_CRTC2_CUR_EN | RADEON_CRTC2_ICON_EN)) | ++ RADEON_CRTC2_DISP_REQ_EN_B); ++ } ++ ++ udelay(500); ++ ++ if (radeon_is_r300(dev_priv)) ++ status_idle = R300_MC_IDLE; ++ else ++ status_idle = RADEON_MC_IDLE; ++ ++ status = radeon_busy_wait(dev, RADEON_MC_STATUS, status_idle, 200000); ++ if (status == 0xffffffff) { ++ DRM_ERROR("Timeout waiting for memory controller to update settings\n"); ++ DRM_ERROR("Bad things may or may not happen\n"); ++ } ++} ++ ++ ++void radeon_init_memory_map(struct drm_device *dev) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ u32 mem_size, aper_size; ++ ++ dev_priv->mc_fb_location = radeon_read_fb_location(dev_priv); ++ radeon_read_agp_location(dev_priv, &dev_priv->mc_agp_loc_lo, &dev_priv->mc_agp_loc_hi); ++ ++ if (dev_priv->chip_family >= CHIP_R600) { ++ mem_size = RADEON_READ(R600_CONFIG_MEMSIZE); ++ aper_size = RADEON_READ(R600_CONFIG_APER_SIZE); ++ } else { ++ mem_size = RADEON_READ(RADEON_CONFIG_MEMSIZE); ++ aper_size = RADEON_READ(RADEON_CONFIG_APER_SIZE); ++ } ++ ++ /* M6s report illegal memory size */ ++ if (mem_size == 0) ++ mem_size = 8 * 1024 * 1024; ++ ++ /* for RN50/M6/M7 - Novell bug 204882 */ ++ if (aper_size > mem_size) ++ mem_size = aper_size; ++ ++ if ((dev_priv->chip_family != CHIP_RS600) && ++ (dev_priv->chip_family != CHIP_RS690) && ++ (dev_priv->chip_family != CHIP_RS740)) { ++ if (dev_priv->flags & RADEON_IS_IGP) ++ dev_priv->mc_fb_location = RADEON_READ(RADEON_NB_TOM); ++ else { ++ uint32_t aper0_base; ++ ++ if (dev_priv->chip_family >= CHIP_R600) ++ aper0_base = RADEON_READ(R600_CONFIG_F0_BASE); ++ else ++ aper0_base = RADEON_READ(RADEON_CONFIG_APER_0_BASE); ++ ++ ++ /* Some chips have an "issue" with the memory controller, the ++ * location must be aligned to the size. We just align it down, ++ * too bad if we walk over the top of system memory, we don't ++ * use DMA without a remapped anyway. ++ * Affected chips are rv280, all r3xx, and all r4xx, but not IGP ++ */ ++ if (dev_priv->chip_family == CHIP_RV280 || ++ dev_priv->chip_family == CHIP_R300 || ++ dev_priv->chip_family == CHIP_R350 || ++ dev_priv->chip_family == CHIP_RV350 || ++ dev_priv->chip_family == CHIP_RV380 || ++ dev_priv->chip_family == CHIP_R420 || ++ dev_priv->chip_family == CHIP_R423 || ++ dev_priv->chip_family == CHIP_RV410) ++ aper0_base &= ~(mem_size - 1); ++ ++ if (dev_priv->chip_family >= CHIP_R600) { ++ dev_priv->mc_fb_location = (aper0_base >> 24) | ++ (((aper0_base + mem_size - 1) & 0xff000000U) >> 8); ++ } else { ++ dev_priv->mc_fb_location = (aper0_base >> 16) | ++ ((aper0_base + mem_size - 1) & 0xffff0000U); ++ } ++ } ++ } ++ ++ if (dev_priv->chip_family >= CHIP_R600) ++ dev_priv->fb_location = (dev_priv->mc_fb_location & 0xffff) << 24; ++ else ++ dev_priv->fb_location = (dev_priv->mc_fb_location & 0xffff) << 16; ++ ++ /* updating mc regs here */ ++ if (radeon_is_avivo(dev_priv)) ++ avivo_disable_mc_clients(dev); ++ else ++ legacy_disable_mc_clients(dev); ++ ++ radeon_write_fb_location(dev_priv, dev_priv->mc_fb_location); ++ ++ if (radeon_is_avivo(dev_priv)) { ++ if (dev_priv->chip_family >= CHIP_R600) ++ RADEON_WRITE(R600_HDP_NONSURFACE_BASE, (dev_priv->mc_fb_location << 16) & 0xff0000); ++ else ++ RADEON_WRITE(AVIVO_HDP_FB_LOCATION, dev_priv->mc_fb_location); ++ } ++ ++ if (dev_priv->chip_family >= CHIP_R600) { ++ dev_priv->fb_location = (radeon_read_fb_location(dev_priv) & 0xffffff) << 24; ++ dev_priv->fb_size = ((radeon_read_fb_location(dev_priv) & 0xff000000u) + 0x1000000) ++ - dev_priv->fb_location; ++ } else { ++ dev_priv->fb_location = (radeon_read_fb_location(dev_priv) & 0xffff) << 16; ++ dev_priv->fb_size = ++ ((radeon_read_fb_location(dev_priv) & 0xffff0000u) + 0x10000) ++ - dev_priv->fb_location; ++ } ++ ++ /* add an MTRR for the VRAM */ ++ dev_priv->aper_size = aper_size; ++ dev_priv->vram_mtrr = mtrr_add(dev_priv->fb_aper_offset, dev_priv->aper_size, MTRR_TYPE_WRCOMB, 1); ++ ++} ++ ++#if __OS_HAS_AGP ++int radeon_modeset_agp_init(struct drm_device *dev) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ struct drm_agp_mode mode; ++ struct drm_agp_info info; ++ int ret; ++ int default_mode; ++ uint32_t agp_status; ++ bool is_v3; ++ ++ /* Acquire AGP. */ ++ ret = drm_agp_acquire(dev); ++ if (ret) { ++ DRM_ERROR("Unable to acquire AGP: %d\n", ret); ++ return ret; ++ } ++ ++ ret = drm_agp_info(dev, &info); ++ if (ret) { ++ DRM_ERROR("Unable to get AGP info: %d\n", ret); ++ return ret; ++ } ++ ++ mode.mode = info.mode; ++ ++ agp_status = (RADEON_READ(RADEON_AGP_STATUS) | RADEON_AGPv3_MODE) & mode.mode; ++ is_v3 = !!(agp_status & RADEON_AGPv3_MODE); ++ ++ if (is_v3) { ++ default_mode = (agp_status & RADEON_AGPv3_8X_MODE) ? 8 : 4; ++ } else { ++ if (agp_status & RADEON_AGP_4X_MODE) default_mode = 4; ++ else if (agp_status & RADEON_AGP_2X_MODE) default_mode = 2; ++ else default_mode = 1; ++ } ++ ++ if (radeon_agpmode > 0) { ++ if ((radeon_agpmode < (is_v3 ? 4 : 1)) || ++ (radeon_agpmode > (is_v3 ? 8 : 4)) || ++ (radeon_agpmode & (radeon_agpmode - 1))) { ++ DRM_ERROR("Illegal AGP Mode: %d (valid %s), leaving at %d\n", ++ radeon_agpmode, is_v3 ? "4, 8" : "1, 2, 4", ++ default_mode); ++ radeon_agpmode = default_mode; ++ } ++ else ++ DRM_INFO("AGP mode requested: %d\n", radeon_agpmode); ++ } else ++ radeon_agpmode = default_mode; ++ ++ mode.mode &= ~RADEON_AGP_MODE_MASK; ++ if (is_v3) { ++ switch(radeon_agpmode) { ++ case 8: ++ mode.mode |= RADEON_AGPv3_8X_MODE; ++ break; ++ case 4: ++ default: ++ mode.mode |= RADEON_AGPv3_4X_MODE; ++ break; ++ } ++ } else { ++ switch(radeon_agpmode) { ++ case 4: mode.mode |= RADEON_AGP_4X_MODE; ++ case 2: mode.mode |= RADEON_AGP_2X_MODE; ++ case 1: ++ default: ++ mode.mode |= RADEON_AGP_1X_MODE; ++ break; ++ } ++ } ++ ++ mode.mode &= ~RADEON_AGP_FW_MODE; /* disable fw */ ++ ++ ret = drm_agp_enable(dev, mode); ++ if (ret) { ++ DRM_ERROR("Unable to enable AGP (mode = 0x%lx)\n", mode.mode); ++ return ret; ++ } ++ ++ /* workaround some hw issues */ ++ if (dev_priv->chip_family < CHIP_R200) { ++ RADEON_WRITE(RADEON_AGP_CNTL, RADEON_READ(RADEON_AGP_CNTL) | 0x000e0000); ++ } ++ return 0; ++} ++ ++void radeon_modeset_agp_destroy(struct drm_device *dev) ++{ ++ if (dev->agp->acquired) ++ drm_agp_release(dev); ++} ++#endif ++ ++/* init memory manager - start with all of VRAM and a 32MB GART aperture for now */ ++int radeon_gem_mm_init(struct drm_device *dev) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ int ret; ++ u32 pg_offset; ++ ++ /* init TTM underneath */ ++ drm_bo_driver_init(dev); ++ ++ /* use the uncached allocator */ ++ dev->bm.allocator_type = _DRM_BM_ALLOCATOR_UNCACHED; ++ ++ /* size the mappable VRAM memory for now */ ++ radeon_vram_setup(dev); ++ ++ radeon_init_memory_map(dev); ++ ++#define VRAM_RESERVE_TEXT (256*1024) /* need to reserve 256 for text mode for now */ ++ dev_priv->mm.vram_visible -= VRAM_RESERVE_TEXT; ++ pg_offset = VRAM_RESERVE_TEXT >> PAGE_SHIFT; ++ drm_bo_init_mm(dev, DRM_BO_MEM_VRAM, pg_offset, /*dev_priv->mm.vram_offset >> PAGE_SHIFT,*/ ++ ((dev_priv->mm.vram_visible) >> PAGE_SHIFT) - 16, ++ 0); ++ ++ /* need AGP to work out sizes */ ++#if __OS_HAS_AGP ++ if (dev_priv->flags & RADEON_IS_AGP) { ++ radeon_modeset_agp_init(dev); ++ ++ if (dev->agp->agp_info.aper_size < radeon_gart_size) ++ radeon_gart_size = dev->agp->agp_info.aper_size; ++ } ++#endif ++ ++ if (dev_priv->chip_family > CHIP_R600) { ++ dev_priv->mm_enabled = true; ++ return 0; ++ } ++ ++ dev_priv->mm.gart_size = (radeon_gart_size * 1024 * 1024); ++ dev_priv->mm.gart_start = 0; ++ dev_priv->mm.gart_useable = dev_priv->mm.gart_size; ++ ret = radeon_gart_init(dev); ++ if (ret) ++ return -EINVAL; ++ ++ drm_bo_init_mm(dev, DRM_BO_MEM_TT, 0, ++ dev_priv->mm.gart_size >> PAGE_SHIFT, ++ 0); ++ ++ /* need to allocate some objects in the GART */ ++ /* ring + ring read ptr */ ++ ret = radeon_alloc_gart_objects(dev); ++ if (ret) { ++ radeon_gem_mm_fini(dev); ++ return -EINVAL; ++ } ++ ++ dev_priv->mm_enabled = true; ++ return 0; ++} ++ ++void radeon_gem_mm_fini(struct drm_device *dev) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ ++ radeon_gem_ib_destroy(dev); ++ ++ mutex_lock(&dev->struct_mutex); ++ ++ if (dev_priv->mm.ring_read.bo) { ++ drm_bo_kunmap(&dev_priv->mm.ring_read.kmap); ++ drm_bo_usage_deref_locked(&dev_priv->mm.ring_read.bo); ++ } ++ ++ if (dev_priv->mm.ring.bo) { ++ drm_bo_kunmap(&dev_priv->mm.ring.kmap); ++ drm_bo_usage_deref_locked(&dev_priv->mm.ring.bo); ++ } ++ ++ if (drm_bo_clean_mm(dev, DRM_BO_MEM_TT, 1)) { ++ DRM_DEBUG("delaying takedown of TTM memory\n"); ++ } ++ ++ if (dev_priv->flags & RADEON_IS_PCIE) { ++ if (dev_priv->mm.pcie_table_backup) { ++ kfree(dev_priv->mm.pcie_table_backup); ++ dev_priv->mm.pcie_table_backup = NULL; ++ } ++ if (dev_priv->mm.pcie_table.bo) { ++ drm_bo_kunmap(&dev_priv->mm.pcie_table.kmap); ++ drm_bo_usage_deref_locked(&dev_priv->mm.pcie_table.bo); ++ } ++ } ++ ++#if __OS_HAS_AGP ++ if (dev_priv->flags & RADEON_IS_AGP) ++ radeon_modeset_agp_destroy(dev); ++#endif ++ ++ if (drm_bo_clean_mm(dev, DRM_BO_MEM_VRAM, 1)) { ++ DRM_DEBUG("delaying takedown of VRAM memory\n"); ++ } ++ ++ if (dev_priv->vram_mtrr) ++ mtrr_del(dev_priv->vram_mtrr, dev_priv->fb_aper_offset, dev_priv->aper_size); ++ mutex_unlock(&dev->struct_mutex); ++ ++ drm_bo_driver_finish(dev); ++ dev_priv->mm_enabled = false; ++} ++ ++int radeon_gem_object_pin(struct drm_gem_object *obj, ++ uint32_t alignment, uint32_t pin_domain) ++{ ++ struct drm_radeon_gem_object *obj_priv; ++ int ret; ++ uint32_t flags = DRM_BO_FLAG_NO_EVICT; ++ uint32_t mask = DRM_BO_FLAG_NO_EVICT; ++ ++ obj_priv = obj->driver_private; ++ ++ if (pin_domain) { ++ mask |= DRM_BO_MASK_MEM; ++ if (pin_domain == RADEON_GEM_DOMAIN_GTT) ++ flags |= DRM_BO_FLAG_MEM_TT; ++ else if (pin_domain == RADEON_GEM_DOMAIN_VRAM) ++ flags |= DRM_BO_FLAG_MEM_VRAM; ++ else ++ return -EINVAL; ++ } ++ ret = drm_bo_do_validate(obj_priv->bo, flags, mask, ++ DRM_BO_HINT_DONT_FENCE, 0); ++ ++ return ret; ++} ++ ++int radeon_gem_object_unpin(struct drm_gem_object *obj) ++{ ++ struct drm_radeon_gem_object *obj_priv; ++ int ret; ++ ++ obj_priv = obj->driver_private; ++ ++ ret = drm_bo_do_validate(obj_priv->bo, 0, DRM_BO_FLAG_NO_EVICT, ++ DRM_BO_HINT_DONT_FENCE, 0); ++ ++ return ret; ++} ++ ++#define RADEON_IB_MEMORY (1*1024*1024) ++#define RADEON_IB_SIZE (65536) ++ ++#define RADEON_NUM_IB (RADEON_IB_MEMORY / RADEON_IB_SIZE) ++ ++int radeon_gem_ib_get(struct drm_radeon_cs_parser *parser) ++{ ++ int i, index = -1; ++ int ret; ++ drm_radeon_private_t *dev_priv = parser->dev->dev_private; ++ ++ for (i = 0; i < RADEON_NUM_IB; i++) { ++ if (!(dev_priv->ib_alloc_bitmap & (1 << i))){ ++ index = i; ++ break; ++ } ++ } ++ ++ /* if all in use we need to wait */ ++ if (index == -1) { ++ for (i = 0; i < RADEON_NUM_IB; i++) { ++ if (dev_priv->ib_alloc_bitmap & (1 << i)) { ++ mutex_lock(&dev_priv->ib_objs[i]->bo->mutex); ++ ret = drm_bo_wait(dev_priv->ib_objs[i]->bo, 0, 1, 0, 0); ++ mutex_unlock(&dev_priv->ib_objs[i]->bo->mutex); ++ if (ret) ++ continue; ++ dev_priv->ib_alloc_bitmap &= ~(1 << i); ++ index = i; ++ break; ++ } ++ } ++ } ++ ++ if (index == -1) { ++ DRM_ERROR("Major case fail to allocate IB from freelist %llx\n", dev_priv->ib_alloc_bitmap); ++ return -EINVAL; ++ } ++ ++ ++ if (parser->chunks[parser->ib_index].length_dw > RADEON_IB_SIZE / sizeof(uint32_t)) ++ return -EINVAL; ++ ++ ret = drm_bo_do_validate(dev_priv->ib_objs[index]->bo, 0, ++ DRM_BO_FLAG_NO_EVICT, ++ 0, 0); ++ if (ret) { ++ DRM_ERROR("Failed to validate IB %d\n", index); ++ return -EINVAL; ++ } ++ ++ parser->ib = dev_priv->ib_objs[index]->kmap.virtual; ++ parser->card_offset = dev_priv->gart_vm_start + dev_priv->ib_objs[index]->bo->offset; ++ dev_priv->ib_alloc_bitmap |= (1 << i); ++ return 0; ++} ++ ++static void radeon_gem_ib_free(struct drm_radeon_cs_parser *parser, int error) ++{ ++ struct drm_device *dev = parser->dev; ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ struct drm_fence_object *fence; ++ int ret; ++ int i; ++ ++ for (i = 0; i < RADEON_NUM_IB; i++) { ++ if (dev_priv->ib_objs[i]->kmap.virtual == parser->ib) { ++ ++ if (error) { ++ drm_putback_buffer_objects(dev); ++ dev_priv->ib_alloc_bitmap &= ~(1 << i); ++ parser->ib = NULL; ++ parser->card_offset = 0; ++ break; ++ } else { ++ /* emit a fence object */ ++ ret = drm_fence_buffer_objects(dev, NULL, 0, NULL, &fence); ++ dev_priv->irq_emitted = 0; ++ if (ret) { ++ drm_putback_buffer_objects(dev); ++ } ++ /* dereference the fence object */ ++ if (fence) ++ drm_fence_usage_deref_unlocked(&fence); ++ break; ++ } ++ } ++ } ++} ++ ++static int radeon_gem_ib_destroy(struct drm_device *dev) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ int i; ++ ++ if (dev_priv->ib_objs) { ++ for (i = 0; i < RADEON_NUM_IB; i++) { ++ if (dev_priv->ib_objs[i]) { ++ drm_bo_kunmap(&dev_priv->ib_objs[i]->kmap); ++ drm_bo_usage_deref_unlocked(&dev_priv->ib_objs[i]->bo); ++ } ++ drm_free(dev_priv->ib_objs[i], sizeof(struct radeon_mm_obj), DRM_MEM_DRIVER); ++ } ++ drm_free(dev_priv->ib_objs, RADEON_NUM_IB*sizeof(struct radeon_mm_obj *), DRM_MEM_DRIVER); ++ } ++ dev_priv->ib_objs = NULL; ++ return 0; ++} ++ ++static int radeon_gem_find_reloc(struct drm_radeon_cs_parser *parser, ++ uint32_t offset, uint32_t *handle, ++ uint32_t *retval) ++{ ++ struct drm_device *dev = parser->dev; ++ struct drm_radeon_kernel_chunk *reloc_chunk = &parser->chunks[parser->reloc_index]; ++ ++ if (!reloc_chunk->kdata) ++ return -EINVAL; ++ ++ if (offset > reloc_chunk->length_dw){ ++ DRM_ERROR("Offset larger than chunk %d %d\n", offset, reloc_chunk->length_dw); ++ return -EINVAL; ++ } ++ ++ *handle = reloc_chunk->kdata[offset]; ++ *retval = reloc_chunk->kdata[offset + 3]; ++ return 0; ++} ++ ++static int radeon_gem_do_relocate(struct drm_gem_object *obj, uint32_t read_domains, ++ uint32_t write_domain, uint32_t *offset) ++{ ++ struct drm_device *dev = obj->dev; ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ struct drm_radeon_gem_object *obj_priv; ++ uint32_t flags; ++ int ret; ++ ++ obj_priv = obj->driver_private; ++ ret = radeon_gem_set_domain(obj, read_domains, write_domain, &flags, false); ++ if (ret) { ++ DRM_ERROR("radeon gem set domain %d failed %x %x\n", ret, read_domains, write_domain); ++ return ret; ++ } ++ ++ obj_priv->bo->mem.flags &= ~DRM_BO_FLAG_CLEAN; ++ obj_priv->bo->mem.proposed_flags &= ~DRM_BO_FLAG_CLEAN; ++ ++ if (offset) { ++ if (flags == DRM_BO_FLAG_MEM_VRAM) ++ *offset = obj_priv->bo->offset + dev_priv->fb_location; ++ else if (flags == DRM_BO_FLAG_MEM_TT) ++ *offset = obj_priv->bo->offset + dev_priv->gart_vm_start; ++ } ++ ++ return 0; ++} ++ ++static int radeon_gem_relocate(struct drm_radeon_cs_parser *parser, ++ uint32_t *reloc, uint32_t *offset) ++{ ++ struct drm_device *dev = parser->dev; ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ /* relocate the handle */ ++ uint32_t read_domains, write_domain; ++ struct drm_gem_object *obj; ++ int flags = 0; ++ int ret = 0; ++ struct drm_radeon_gem_object *obj_priv; ++ ++ if (parser->reloc_index == -1) { ++ obj = drm_gem_object_lookup(dev, parser->file_priv, reloc[1]); ++ if (!obj) ++ return -EINVAL; ++ read_domains = reloc[2]; ++ write_domain = reloc[3]; ++ ++ ret = radeon_gem_do_relocate(obj, read_domains, write_domain, offset); ++ ++ mutex_lock(&dev->struct_mutex); ++ drm_gem_object_unreference(obj); ++ mutex_unlock(&dev->struct_mutex); ++ ++ } else { ++ uint32_t handle; ++ /* have to lookup handle in other chunk */ ++ ret = radeon_gem_find_reloc(parser, reloc[1], &handle, offset); ++ } ++ return ret; ++} ++ ++ ++int radeon_gem_prelocate(struct drm_radeon_cs_parser *parser) ++{ ++ struct drm_device *dev = parser->dev; ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ struct drm_radeon_kernel_chunk *reloc_chunk = &parser->chunks[parser->reloc_index]; ++ struct drm_gem_object *obj; ++ int pass_id; ++ int i; ++ int ret; ++ ++ /* no relocs - return now */ ++ if (!reloc_chunk->kdata) ++ return 0; ++ ++ for (pass_id = 0; pass_id < 2; pass_id++) { ++ /* traverse the reloc chunk */ ++ for (i = 0; i < reloc_chunk->length_dw; i += 4) { ++ ++ if (pass_id == 0) ++ reloc_chunk->kdata[i + 3] = 0; ++ ++ if (pass_id == 1 && reloc_chunk->kdata[i + 3]) ++ continue; ++ ++ /* first pass get all write domains */ ++ if (((pass_id == 0) && reloc_chunk->kdata[i + 2]) || ++ ((pass_id == 1) && reloc_chunk->kdata[i + 1])) { ++ obj = drm_gem_object_lookup(dev, parser->file_priv, reloc_chunk->kdata[i]); ++ if (!obj) { ++ DRM_ERROR("gem object lookup failed %x\n", reloc_chunk->kdata[i]); ++ return -EINVAL; ++ } ++ ret = radeon_gem_do_relocate(obj, reloc_chunk->kdata[i + 1], reloc_chunk->kdata[i + 2], ++ &reloc_chunk->kdata[i + 3]); ++ mutex_lock(&dev->struct_mutex); ++ drm_gem_object_unreference(obj); ++ mutex_unlock(&dev->struct_mutex); ++ if (ret) ++ return ret; ++ } ++ } ++ } ++ return 0; ++} ++ ++/* allocate 1MB of 64k IBs the the kernel can keep mapped */ ++static int radeon_gem_ib_init(struct drm_device *dev) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ int i; ++ int ret; ++ ++ dev_priv->ib_objs = drm_calloc(RADEON_NUM_IB, sizeof(struct radeon_mm_obj *), DRM_MEM_DRIVER); ++ if (!dev_priv->ib_objs) ++ goto free_all; ++ ++ for (i = 0; i < RADEON_NUM_IB; i++) { ++ dev_priv->ib_objs[i] = drm_calloc(1, sizeof(struct radeon_mm_obj), DRM_MEM_DRIVER); ++ if (!dev_priv->ib_objs[i]) ++ goto free_all; ++ ++ ret = drm_buffer_object_create(dev, RADEON_IB_SIZE, ++ drm_bo_type_kernel, ++ DRM_BO_FLAG_READ | DRM_BO_FLAG_MEM_TT | ++ DRM_BO_FLAG_MAPPABLE, 0, ++ 0, 0, &dev_priv->ib_objs[i]->bo); ++ if (ret) ++ goto free_all; ++ ++ ret = drm_bo_kmap(dev_priv->ib_objs[i]->bo, 0, RADEON_IB_SIZE >> PAGE_SHIFT, ++ &dev_priv->ib_objs[i]->kmap); ++ ++ if (ret) ++ goto free_all; ++ } ++ ++ dev_priv->mm.gart_useable -= RADEON_IB_SIZE * RADEON_NUM_IB; ++ dev_priv->ib_alloc_bitmap = 0; ++ ++ dev_priv->cs.ib_get = radeon_gem_ib_get; ++ dev_priv->cs.ib_free = radeon_gem_ib_free; ++ ++ radeon_cs_init(dev); ++ dev_priv->cs.relocate = radeon_gem_relocate; ++ return 0; ++ ++free_all: ++ radeon_gem_ib_destroy(dev); ++ return -ENOMEM; ++} ++ ++static struct drm_gem_object *gem_object_get(struct drm_device *dev, uint32_t name) ++{ ++ struct drm_gem_object *obj; ++ ++ spin_lock(&dev->object_name_lock); ++ obj = idr_find(&dev->object_name_idr, name); ++ if (obj) ++ drm_gem_object_reference(obj); ++ spin_unlock(&dev->object_name_lock); ++ return obj; ++} ++ ++void radeon_gem_update_offsets(struct drm_device *dev, struct drm_master *master) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ struct drm_radeon_master_private *master_priv = master->driver_priv; ++ drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv; ++ struct drm_gem_object *obj; ++ struct drm_radeon_gem_object *obj_priv; ++ ++ /* update front_pitch_offset and back_pitch_offset */ ++ obj = gem_object_get(dev, sarea_priv->front_handle); ++ if (obj) { ++ obj_priv = obj->driver_private; ++ ++ dev_priv->front_offset = obj_priv->bo->offset; ++ dev_priv->front_pitch_offset = (((sarea_priv->front_pitch / 64) << 22) | ++ ((obj_priv->bo->offset ++ + dev_priv->fb_location) >> 10)); ++ drm_gem_object_unreference(obj); ++ } ++ ++ obj = gem_object_get(dev, sarea_priv->back_handle); ++ if (obj) { ++ obj_priv = obj->driver_private; ++ dev_priv->back_offset = obj_priv->bo->offset; ++ dev_priv->back_pitch_offset = (((sarea_priv->back_pitch / 64) << 22) | ++ ((obj_priv->bo->offset ++ + dev_priv->fb_location) >> 10)); ++ drm_gem_object_unreference(obj); ++ } ++ dev_priv->color_fmt = RADEON_COLOR_FORMAT_ARGB8888; ++ ++} ++ ++ +diff --git a/drivers/gpu/drm/radeon/radeon_gem_proc.c b/drivers/gpu/drm/radeon/radeon_gem_proc.c +new file mode 100644 +index 0000000..04f5a5f +--- /dev/null ++++ b/drivers/gpu/drm/radeon/radeon_gem_proc.c +@@ -0,0 +1,146 @@ ++/* ++ * Copyright © 2008 Intel Corporation ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING ++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS ++ * IN THE SOFTWARE. ++ * ++ * Authors: ++ * Eric Anholt ++ * Keith Packard ++ * ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "radeon_drm.h" ++#include "radeon_drv.h" ++ ++ ++static int radeon_ring_info(char *buf, char **start, off_t offset, ++ int request, int *eof, void *data) ++{ ++ struct drm_minor *minor = (struct drm_minor *) data; ++ struct drm_device *dev = minor->dev; ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ int len = 0; ++ ++ if (offset > DRM_PROC_LIMIT) { ++ *eof = 1; ++ return 0; ++ } ++ ++ *start = &buf[offset]; ++ *eof = 0; ++ DRM_PROC_PRINT("RADEON_CP_RB_WPTR %08x\n", ++ RADEON_READ(RADEON_CP_RB_WPTR)); ++ ++ DRM_PROC_PRINT("RADEON_CP_RB_RPTR %08x\n", ++ RADEON_READ(RADEON_CP_RB_RPTR)); ++ ++ ++ if (len > request + offset) ++ return request; ++ *eof = 1; ++ return len - offset; ++} ++ ++static int radeon_interrupt_info(char *buf, char **start, off_t offset, ++ int request, int *eof, void *data) ++{ ++ struct drm_minor *minor = (struct drm_minor *) data; ++ struct drm_device *dev = minor->dev; ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ int len = 0; ++ ++ if (offset > DRM_PROC_LIMIT) { ++ *eof = 1; ++ return 0; ++ } ++ ++ *start = &buf[offset]; ++ *eof = 0; ++ DRM_PROC_PRINT("Interrupt enable: %08x\n", ++ RADEON_READ(RADEON_GEN_INT_CNTL)); ++ ++ if (dev_priv->chip_family >= CHIP_RS690) { ++ DRM_PROC_PRINT("DxMODE_INT_MASK: %08x\n", ++ RADEON_READ(R500_DxMODE_INT_MASK)); ++ } ++ DRM_PROC_PRINT("Interrupts received: %d\n", ++ atomic_read(&dev_priv->irq_received)); ++ DRM_PROC_PRINT("Current sequence: %d %d\n", ++ READ_BREADCRUMB(dev_priv), RADEON_READ(RADEON_SCRATCH_REG3)); ++ DRM_PROC_PRINT("Counter sequence: %d\n", ++ dev_priv->counter); ++ if (dev_priv->chip_family >= CHIP_R300) ++ DRM_PROC_PRINT("CS: %d\n", ++ GET_SCRATCH(6)); ++ ++ ++ if (len > request + offset) ++ return request; ++ *eof = 1; ++ return len - offset; ++} ++ ++static struct drm_proc_list { ++ /** file name */ ++ const char *name; ++ /** proc callback*/ ++ int (*f) (char *, char **, off_t, int, int *, void *); ++} radeon_gem_proc_list[] = { ++ {"radeon_gem_interrupt", radeon_interrupt_info}, ++ {"radeon_gem_ring", radeon_ring_info}, ++}; ++ ++ ++#define RADEON_GEM_PROC_ENTRIES ARRAY_SIZE(radeon_gem_proc_list) ++ ++int radeon_gem_proc_init(struct drm_minor *minor) ++{ ++ struct proc_dir_entry *ent; ++ int i, j; ++ ++ for (i = 0; i < RADEON_GEM_PROC_ENTRIES; i++) { ++ ent = create_proc_entry(radeon_gem_proc_list[i].name, ++ S_IFREG | S_IRUGO, minor->dev_root); ++ if (!ent) { ++ DRM_ERROR("Cannot create /proc/dri/.../%s\n", ++ radeon_gem_proc_list[i].name); ++ for (j = 0; j < i; j++) ++ remove_proc_entry(radeon_gem_proc_list[i].name, ++ minor->dev_root); ++ return -1; ++ } ++ ent->read_proc = radeon_gem_proc_list[i].f; ++ ent->data = minor; ++ } ++ return 0; ++} ++ ++void radeon_gem_proc_cleanup(struct drm_minor *minor) ++{ ++ int i; ++ ++ if (!minor->dev_root) ++ return; ++ ++ for (i = 0; i < RADEON_GEM_PROC_ENTRIES; i++) ++ remove_proc_entry(radeon_gem_proc_list[i].name, minor->dev_root); ++} +diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c +new file mode 100644 +index 0000000..94a485b +--- /dev/null ++++ b/drivers/gpu/drm/radeon/radeon_i2c.c +@@ -0,0 +1,196 @@ ++/* ++ * Copyright 2007-8 Advanced Micro Devices, Inc. ++ * Copyright 2008 Red Hat Inc. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: Dave Airlie ++ * Alex Deucher ++ */ ++#include "drmP.h" ++#include "radeon_drm.h" ++#include "radeon_drv.h" ++ ++/** ++ * radeon_ddc_probe ++ * ++ */ ++bool radeon_ddc_probe(struct radeon_connector *radeon_connector) ++{ ++ u8 out_buf[] = { 0x0, 0x0}; ++ u8 buf[2]; ++ int ret; ++ struct i2c_msg msgs[] = { ++ { ++ .addr = 0x50, ++ .flags = 0, ++ .len = 1, ++ .buf = out_buf, ++ }, ++ { ++ .addr = 0x50, ++ .flags = I2C_M_RD, ++ .len = 1, ++ .buf = buf, ++ } ++ }; ++ ++ ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2); ++ if (ret == 2) ++ return true; ++ ++ return false; ++} ++ ++ ++void radeon_i2c_do_lock(struct radeon_connector *radeon_connector, int lock_state) ++{ ++ struct drm_radeon_private *dev_priv = radeon_connector->base.dev->dev_private; ++ uint32_t temp; ++ struct radeon_i2c_bus_rec *rec = &radeon_connector->ddc_bus->rec; ++ ++ if (lock_state) { ++ temp = RADEON_READ(rec->a_clk_reg); ++ temp &= ~(rec->a_clk_mask); ++ RADEON_WRITE(rec->a_clk_reg, temp); ++ ++ temp = RADEON_READ(rec->a_data_reg); ++ temp &= ~(rec->a_data_mask); ++ RADEON_WRITE(rec->a_data_reg, temp); ++ } ++ ++ temp = RADEON_READ(rec->mask_clk_reg); ++ if (lock_state) ++ temp |= rec->mask_clk_mask; ++ else ++ temp &= ~rec->mask_clk_mask; ++ RADEON_WRITE(rec->mask_clk_reg, temp); ++ temp = RADEON_READ(rec->mask_clk_reg); ++ ++ temp = RADEON_READ(rec->mask_data_reg); ++ if (lock_state) ++ temp |= rec->mask_data_mask; ++ else ++ temp &= ~rec->mask_data_mask; ++ RADEON_WRITE(rec->mask_data_reg, temp); ++ temp = RADEON_READ(rec->mask_data_reg); ++} ++ ++static int get_clock(void *i2c_priv) ++{ ++ struct radeon_i2c_chan *i2c = i2c_priv; ++ struct drm_radeon_private *dev_priv = i2c->dev->dev_private; ++ struct radeon_i2c_bus_rec *rec = &i2c->rec; ++ uint32_t val; ++ ++ val = RADEON_READ(rec->get_clk_reg); ++ val &= rec->get_clk_mask; ++ ++ return (val != 0); ++} ++ ++ ++static int get_data(void *i2c_priv) ++{ ++ struct radeon_i2c_chan *i2c = i2c_priv; ++ struct drm_radeon_private *dev_priv = i2c->dev->dev_private; ++ struct radeon_i2c_bus_rec *rec = &i2c->rec; ++ uint32_t val; ++ ++ val = RADEON_READ(rec->get_data_reg); ++ val &= rec->get_data_mask; ++ return (val != 0); ++} ++ ++static void set_clock(void *i2c_priv, int clock) ++{ ++ struct radeon_i2c_chan *i2c = i2c_priv; ++ struct drm_radeon_private *dev_priv = i2c->dev->dev_private; ++ struct radeon_i2c_bus_rec *rec = &i2c->rec; ++ uint32_t val; ++ ++ val = RADEON_READ(rec->put_clk_reg) & (uint32_t)~(rec->put_clk_mask); ++ val |= clock ? 0 : rec->put_clk_mask; ++ RADEON_WRITE(rec->put_clk_reg, val); ++} ++ ++static void set_data(void *i2c_priv, int data) ++{ ++ struct radeon_i2c_chan *i2c = i2c_priv; ++ struct drm_radeon_private *dev_priv = i2c->dev->dev_private; ++ struct radeon_i2c_bus_rec *rec = &i2c->rec; ++ uint32_t val; ++ ++ val = RADEON_READ(rec->put_data_reg) & (uint32_t)~(rec->put_data_mask); ++ val |= data ? 0 : rec->put_data_mask; ++ RADEON_WRITE(rec->put_data_reg, val); ++} ++ ++struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev, ++ struct radeon_i2c_bus_rec *rec, ++ const char *name) ++{ ++ struct radeon_i2c_chan *i2c; ++ int ret; ++ ++ i2c = drm_calloc(1, sizeof(struct radeon_i2c_chan), DRM_MEM_DRIVER); ++ if (i2c == NULL) ++ return NULL; ++ ++ i2c->adapter.owner = THIS_MODULE; ++ i2c->adapter.id = I2C_HW_B_RADEON; ++ i2c->adapter.algo_data = &i2c->algo; ++ i2c->dev = dev; ++ i2c->algo.setsda = set_data; ++ i2c->algo.setscl = set_clock; ++ i2c->algo.getsda = get_data; ++ i2c->algo.getscl = get_clock; ++ i2c->algo.udelay = 20; ++ i2c->algo.timeout = usecs_to_jiffies(2200); ++ i2c->algo.data = i2c; ++ i2c->rec = *rec; ++ i2c_set_adapdata(&i2c->adapter, i2c); ++ ++ ret = i2c_bit_add_bus(&i2c->adapter); ++ if (ret) { ++ DRM_INFO("Failed to register i2c %s\n", name); ++ goto out_free; ++ } ++ ++ return i2c; ++out_free: ++ drm_free(i2c, sizeof(struct radeon_i2c_chan), DRM_MEM_DRIVER); ++ return NULL; ++ ++} ++ ++void radeon_i2c_destroy(struct radeon_i2c_chan *i2c) ++{ ++ if (!i2c) ++ return; ++ ++ i2c_del_adapter(&i2c->adapter); ++ drm_free(i2c, sizeof(struct radeon_i2c_chan), DRM_MEM_DRIVER); ++} ++ ++struct drm_encoder *radeon_best_encoder(struct drm_connector *connector) ++{ ++ return NULL; ++} ++ +diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c +index 99be114..02932e5 100644 +--- a/drivers/gpu/drm/radeon/radeon_irq.c ++++ b/drivers/gpu/drm/radeon/radeon_irq.c +@@ -195,11 +195,14 @@ irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS) + if (!stat) + return IRQ_NONE; + ++ atomic_inc(&dev_priv->irq_received); + stat &= dev_priv->irq_enable_reg; + + /* SW interrupt */ +- if (stat & RADEON_SW_INT_TEST) ++ if (stat & RADEON_SW_INT_TEST) { + DRM_WAKEUP(&dev_priv->swi_queue); ++ radeon_fence_handler(dev); ++ } + + /* VBLANK interrupt */ + if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) { +@@ -216,20 +219,23 @@ irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS) + return IRQ_HANDLED; + } + +-static int radeon_emit_irq(struct drm_device * dev) ++int radeon_emit_irq(struct drm_device * dev) + { + drm_radeon_private_t *dev_priv = dev->dev_private; + unsigned int ret; ++ int i; + RING_LOCALS; + +- atomic_inc(&dev_priv->swi_emitted); +- ret = atomic_read(&dev_priv->swi_emitted); ++ if (!dev_priv->irq_emitted) { ++ ret = radeon_update_breadcrumb(dev); + +- BEGIN_RING(4); +- OUT_RING_REG(RADEON_LAST_SWI_REG, ret); +- OUT_RING_REG(RADEON_GEN_INT_STATUS, RADEON_SW_INT_FIRE); +- ADVANCE_RING(); +- COMMIT_RING(); ++ BEGIN_RING(4); ++ OUT_RING_REG(RADEON_LAST_SWI_REG, ret); ++ OUT_RING_REG(RADEON_GEN_INT_STATUS, RADEON_SW_INT_FIRE); ++ ADVANCE_RING(); ++ COMMIT_RING(); ++ } else ++ ret = dev_priv->irq_emitted; + + return ret; + } +@@ -240,13 +246,13 @@ static int radeon_wait_irq(struct drm_device * dev, int swi_nr) + (drm_radeon_private_t *) dev->dev_private; + int ret = 0; + +- if (RADEON_READ(RADEON_LAST_SWI_REG) >= swi_nr) ++ if (READ_BREADCRUMB(dev_priv) >= swi_nr) + return 0; + + dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE; + + DRM_WAIT_ON(ret, dev_priv->swi_queue, 3 * DRM_HZ, +- RADEON_READ(RADEON_LAST_SWI_REG) >= swi_nr); ++ READ_BREADCRUMB(dev_priv) >= swi_nr); + + return ret; + } +@@ -340,7 +346,6 @@ int radeon_driver_irq_postinstall(struct drm_device *dev) + drm_radeon_private_t *dev_priv = + (drm_radeon_private_t *) dev->dev_private; + +- atomic_set(&dev_priv->swi_emitted, 0); + DRM_INIT_WAITQUEUE(&dev_priv->swi_queue); + + dev->max_vblank_count = 0x001fffff; +diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +new file mode 100644 +index 0000000..c0a3c0f +--- /dev/null ++++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +@@ -0,0 +1,1075 @@ ++/* ++ * Copyright 2007-8 Advanced Micro Devices, Inc. ++ * Copyright 2008 Red Hat Inc. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: Dave Airlie ++ * Alex Deucher ++ */ ++#include "drmP.h" ++#include "radeon_drm.h" ++#include "radeon_drv.h" ++ ++#include "drm_crtc_helper.h" ++ ++void radeon_restore_common_regs(struct drm_device *dev) ++{ ++ /* don't need this yet */ ++} ++ ++static void radeon_pll_wait_for_read_update_complete(struct drm_device *dev) ++{ ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ int i = 0; ++ ++ /* FIXME: Certain revisions of R300 can't recover here. Not sure of ++ the cause yet, but this workaround will mask the problem for now. ++ Other chips usually will pass at the very first test, so the ++ workaround shouldn't have any effect on them. */ ++ for (i = 0; ++ (i < 10000 && ++ RADEON_READ_PLL(dev_priv, RADEON_PPLL_REF_DIV) & RADEON_PPLL_ATOMIC_UPDATE_R); ++ i++); ++} ++ ++static void radeon_pll_write_update(struct drm_device *dev) ++{ ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ ++ while (RADEON_READ_PLL(dev_priv, RADEON_PPLL_REF_DIV) & RADEON_PPLL_ATOMIC_UPDATE_R); ++ ++ RADEON_WRITE_PLL_P(dev_priv, RADEON_PPLL_REF_DIV, ++ RADEON_PPLL_ATOMIC_UPDATE_W, ++ ~(RADEON_PPLL_ATOMIC_UPDATE_W)); ++} ++ ++static void radeon_pll2_wait_for_read_update_complete(struct drm_device *dev) ++{ ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ int i = 0; ++ ++ ++ /* FIXME: Certain revisions of R300 can't recover here. Not sure of ++ the cause yet, but this workaround will mask the problem for now. ++ Other chips usually will pass at the very first test, so the ++ workaround shouldn't have any effect on them. */ ++ for (i = 0; ++ (i < 10000 && ++ RADEON_READ_PLL(dev_priv, RADEON_P2PLL_REF_DIV) & RADEON_P2PLL_ATOMIC_UPDATE_R); ++ i++); ++} ++ ++static void radeon_pll2_write_update(struct drm_device *dev) ++{ ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ ++ while (RADEON_READ_PLL(dev_priv, RADEON_P2PLL_REF_DIV) & RADEON_P2PLL_ATOMIC_UPDATE_R); ++ ++ RADEON_WRITE_PLL_P(dev_priv, RADEON_P2PLL_REF_DIV, ++ RADEON_P2PLL_ATOMIC_UPDATE_W, ++ ~(RADEON_P2PLL_ATOMIC_UPDATE_W)); ++} ++ ++static uint8_t radeon_compute_pll_gain(uint16_t ref_freq, uint16_t ref_div, ++ uint16_t fb_div) ++{ ++ unsigned int vcoFreq; ++ ++ if (!ref_div) ++ return 1; ++ ++ vcoFreq = ((unsigned)ref_freq & fb_div) / ref_div; ++ ++ /* ++ * This is horribly crude: the VCO frequency range is divided into ++ * 3 parts, each part having a fixed PLL gain value. ++ */ ++ if (vcoFreq >= 30000) ++ /* ++ * [300..max] MHz : 7 ++ */ ++ return 7; ++ else if (vcoFreq >= 18000) ++ /* ++ * [180..300) MHz : 4 ++ */ ++ return 4; ++ else ++ /* ++ * [0..180) MHz : 1 ++ */ ++ return 1; ++} ++ ++void radeon_crtc_dpms(struct drm_crtc *crtc, int mode) ++{ ++ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); ++ struct drm_device *dev = crtc->dev; ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ uint32_t mask; ++ ++ DRM_DEBUG("\n"); ++ ++ mask = radeon_crtc->crtc_id ? ++ (RADEON_CRTC2_DISP_DIS | RADEON_CRTC2_VSYNC_DIS | RADEON_CRTC2_HSYNC_DIS | RADEON_CRTC2_DISP_REQ_EN_B) : ++ (RADEON_CRTC_DISPLAY_DIS | RADEON_CRTC_VSYNC_DIS | RADEON_CRTC_HSYNC_DIS); ++ ++ switch(mode) { ++ case DRM_MODE_DPMS_ON: ++ if (radeon_crtc->crtc_id) ++ RADEON_WRITE_P(RADEON_CRTC2_GEN_CNTL, 0, ~mask); ++ else { ++ RADEON_WRITE_P(RADEON_CRTC_GEN_CNTL, 0, ~RADEON_CRTC_DISP_REQ_EN_B); ++ RADEON_WRITE_P(RADEON_CRTC_EXT_CNTL, 0, ~mask); ++ } ++ break; ++ case DRM_MODE_DPMS_STANDBY: ++ if (radeon_crtc->crtc_id) ++ RADEON_WRITE_P(RADEON_CRTC2_GEN_CNTL, (RADEON_CRTC2_DISP_DIS | RADEON_CRTC2_HSYNC_DIS), ~mask); ++ else { ++ RADEON_WRITE_P(RADEON_CRTC_GEN_CNTL, 0, ~RADEON_CRTC_DISP_REQ_EN_B); ++ RADEON_WRITE_P(RADEON_CRTC_EXT_CNTL, (RADEON_CRTC_DISPLAY_DIS | RADEON_CRTC_HSYNC_DIS), ~mask); ++ } ++ break; ++ case DRM_MODE_DPMS_SUSPEND: ++ if (radeon_crtc->crtc_id) ++ RADEON_WRITE_P(RADEON_CRTC2_GEN_CNTL, (RADEON_CRTC2_DISP_DIS | RADEON_CRTC2_VSYNC_DIS), ~mask); ++ else { ++ RADEON_WRITE_P(RADEON_CRTC_GEN_CNTL, 0, ~RADEON_CRTC_DISP_REQ_EN_B); ++ RADEON_WRITE_P(RADEON_CRTC_EXT_CNTL, (RADEON_CRTC_DISPLAY_DIS | RADEON_CRTC_VSYNC_DIS), ~mask); ++ } ++ break; ++ case DRM_MODE_DPMS_OFF: ++ if (radeon_crtc->crtc_id) ++ RADEON_WRITE_P(RADEON_CRTC2_GEN_CNTL, mask, ~mask); ++ else { ++ RADEON_WRITE_P(RADEON_CRTC_GEN_CNTL, RADEON_CRTC_DISP_REQ_EN_B, ~RADEON_CRTC_DISP_REQ_EN_B); ++ RADEON_WRITE_P(RADEON_CRTC_EXT_CNTL, mask, ~mask); ++ } ++ break; ++ } ++ ++ if (mode != DRM_MODE_DPMS_OFF) { ++ radeon_crtc_load_lut(crtc); ++ } ++} ++ ++/* properly set crtc bpp when using atombios */ ++void radeon_legacy_atom_set_surface(struct drm_crtc *crtc) ++{ ++ struct drm_device *dev = crtc->dev; ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); ++ int format; ++ uint32_t crtc_gen_cntl, crtc2_gen_cntl; ++ ++ switch (crtc->fb->bits_per_pixel) { ++ case 15: /* 555 */ ++ format = 3; ++ break; ++ case 16: /* 565 */ ++ format = 4; ++ break; ++ case 24: /* RGB */ ++ format = 5; ++ break; ++ case 32: /* xRGB */ ++ format = 6; ++ break; ++ default: ++ return; ++ } ++ ++ switch (radeon_crtc->crtc_id) { ++ case 0: ++ crtc_gen_cntl = RADEON_READ(RADEON_CRTC_GEN_CNTL) & 0xfffff0ff; ++ crtc_gen_cntl |= (format << 8); ++ crtc_gen_cntl |= RADEON_CRTC_EXT_DISP_EN; ++ RADEON_WRITE(RADEON_CRTC_GEN_CNTL, crtc_gen_cntl); ++ break; ++ case 1: ++ crtc2_gen_cntl = RADEON_READ(RADEON_CRTC2_GEN_CNTL) & 0xfffff0ff; ++ crtc2_gen_cntl |= (format << 8); ++ RADEON_WRITE(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl); ++ // not sure we need these... ++ RADEON_WRITE(RADEON_FP_H2_SYNC_STRT_WID, RADEON_READ(RADEON_CRTC2_H_SYNC_STRT_WID)); ++ RADEON_WRITE(RADEON_FP_V2_SYNC_STRT_WID, RADEON_READ(RADEON_CRTC2_V_SYNC_STRT_WID)); ++ break; ++ } ++} ++ ++static bool radeon_set_crtc1_base(struct drm_crtc *crtc, int x, int y) ++{ ++ struct drm_device *dev = crtc->dev; ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ struct radeon_framebuffer *radeon_fb; ++ struct drm_gem_object *obj; ++ struct drm_radeon_gem_object *obj_priv; ++ uint32_t base; ++ uint32_t crtc_offset, crtc_offset_cntl, crtc_tile_x0_y0 = 0; ++ uint32_t crtc_pitch; ++ uint32_t disp_merge_cntl; ++ ++ DRM_DEBUG("\n"); ++ ++ radeon_fb = to_radeon_framebuffer(crtc->fb); ++ ++ obj = radeon_fb->obj; ++ obj_priv = obj->driver_private; ++ ++ crtc_offset = obj_priv->bo->offset; ++ ++ crtc_offset_cntl = 0; ++ ++ /* TODO tiling */ ++ if (0) { ++ if (radeon_is_r300(dev_priv)) ++ crtc_offset_cntl |= (R300_CRTC_X_Y_MODE_EN | ++ R300_CRTC_MICRO_TILE_BUFFER_DIS | ++ R300_CRTC_MACRO_TILE_EN); ++ else ++ crtc_offset_cntl |= RADEON_CRTC_TILE_EN; ++ } else { ++ if (radeon_is_r300(dev_priv)) ++ crtc_offset_cntl &= ~(R300_CRTC_X_Y_MODE_EN | ++ R300_CRTC_MICRO_TILE_BUFFER_DIS | ++ R300_CRTC_MACRO_TILE_EN); ++ else ++ crtc_offset_cntl &= ~RADEON_CRTC_TILE_EN; ++ } ++ ++ base = obj_priv->bo->offset; ++ ++ /* TODO more tiling */ ++ if (0) { ++ if (radeon_is_r300(dev_priv)) { ++ crtc_tile_x0_y0 = x | (y << 16); ++ base &= ~0x7ff; ++ } else { ++ int byteshift = crtc->fb->bits_per_pixel >> 4; ++ int tile_addr = (((y >> 3) * crtc->fb->width + x) >> (8 - byteshift)) << 11; ++ base += tile_addr + ((x << byteshift) % 256) + ((y % 8) << 8); ++ crtc_offset_cntl |= (y % 16); ++ } ++ } else { ++ int offset = y * crtc->fb->pitch + x; ++ switch (crtc->fb->bits_per_pixel) { ++ case 15: ++ case 16: ++ offset *= 2; ++ break; ++ case 24: ++ offset *= 3; ++ break; ++ case 32: ++ offset *= 4; ++ break; ++ default: ++ return false; ++ } ++ base += offset; ++ } ++ ++ base &= ~7; ++ ++ /* update sarea TODO */ ++ ++ crtc_offset = base; ++ ++ crtc_pitch = ((((crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8)) * crtc->fb->bits_per_pixel) + ++ ((crtc->fb->bits_per_pixel * 8) - 1)) / ++ (crtc->fb->bits_per_pixel * 8)); ++ crtc_pitch |= crtc_pitch << 16; ++ ++ DRM_DEBUG("mc_fb_location: 0x%x\n", dev_priv->fb_location); ++ ++ RADEON_WRITE(RADEON_DISPLAY_BASE_ADDR, dev_priv->fb_location); ++ ++ if (radeon_is_r300(dev_priv)) ++ RADEON_WRITE(R300_CRTC_TILE_X0_Y0, crtc_tile_x0_y0); ++ RADEON_WRITE(RADEON_CRTC_OFFSET_CNTL, crtc_offset_cntl); ++ RADEON_WRITE(RADEON_CRTC_OFFSET, crtc_offset); ++ RADEON_WRITE(RADEON_CRTC_PITCH, crtc_pitch); ++ ++ disp_merge_cntl = RADEON_READ(RADEON_DISP_MERGE_CNTL); ++ disp_merge_cntl &= ~RADEON_DISP_RGB_OFFSET_EN; ++ RADEON_WRITE(RADEON_DISP_MERGE_CNTL, disp_merge_cntl); ++ ++ return true; ++} ++ ++static bool radeon_set_crtc1_timing(struct drm_crtc *crtc, struct drm_display_mode *mode) ++{ ++ struct drm_device *dev = crtc->dev; ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ int format; ++ int hsync_start; ++ int hsync_wid; ++ int vsync_wid; ++ uint32_t crtc_gen_cntl; ++ uint32_t crtc_ext_cntl; ++ uint32_t crtc_h_total_disp; ++ uint32_t crtc_h_sync_strt_wid; ++ uint32_t crtc_v_total_disp; ++ uint32_t crtc_v_sync_strt_wid; ++ ++ DRM_DEBUG("\n"); ++ ++ switch (crtc->fb->bits_per_pixel) { ++ case 15: /* 555 */ ++ format = 3; ++ break; ++ case 16: /* 565 */ ++ format = 4; ++ break; ++ case 24: /* RGB */ ++ format = 5; ++ break; ++ case 32: /* xRGB */ ++ format = 6; ++ break; ++ default: ++ return false; ++ } ++ ++ crtc_gen_cntl = (RADEON_CRTC_EXT_DISP_EN ++ | RADEON_CRTC_EN ++ | (format << 8) ++ | ((mode->flags & DRM_MODE_FLAG_DBLSCAN) ++ ? RADEON_CRTC_DBL_SCAN_EN ++ : 0) ++ | ((mode->flags & DRM_MODE_FLAG_CSYNC) ++ ? RADEON_CRTC_CSYNC_EN ++ : 0) ++ | ((mode->flags & DRM_MODE_FLAG_INTERLACE) ++ ? RADEON_CRTC_INTERLACE_EN ++ : 0)); ++ ++ crtc_ext_cntl = RADEON_READ(RADEON_CRTC_EXT_CNTL); ++ crtc_ext_cntl |= (RADEON_XCRT_CNT_EN | ++ RADEON_CRTC_VSYNC_DIS | ++ RADEON_CRTC_HSYNC_DIS | ++ RADEON_CRTC_DISPLAY_DIS); ++ ++ crtc_h_total_disp = ((((mode->crtc_htotal / 8) - 1) & 0x3ff) ++ | ((((mode->crtc_hdisplay / 8) - 1) & 0x1ff) << 16)); ++ ++ hsync_wid = (mode->crtc_hsync_end - mode->crtc_hsync_start) / 8; ++ if (!hsync_wid) ++ hsync_wid = 1; ++ hsync_start = mode->crtc_hsync_start - 8; ++ ++ crtc_h_sync_strt_wid = ((hsync_start & 0x1fff) ++ | ((hsync_wid & 0x3f) << 16) ++ | ((mode->flags & DRM_MODE_FLAG_NHSYNC) ++ ? RADEON_CRTC_H_SYNC_POL ++ : 0)); ++ ++ /* This works for double scan mode. */ ++ crtc_v_total_disp = (((mode->crtc_vtotal - 1) & 0xffff) ++ | ((mode->crtc_vdisplay - 1) << 16)); ++ ++ vsync_wid = mode->crtc_vsync_end - mode->crtc_vsync_start; ++ if (!vsync_wid) ++ vsync_wid = 1; ++ ++ crtc_v_sync_strt_wid = (((mode->crtc_vsync_start - 1) & 0xfff) ++ | ((vsync_wid & 0x1f) << 16) ++ | ((mode->flags & DRM_MODE_FLAG_NVSYNC) ++ ? RADEON_CRTC_V_SYNC_POL ++ : 0)); ++ ++ /* TODO -> Dell Server */ ++ if (0) { ++ uint32_t disp_hw_debug = RADEON_READ(RADEON_DISP_HW_DEBUG); ++ uint32_t tv_dac_cntl = RADEON_READ(RADEON_TV_DAC_CNTL); ++ uint32_t dac2_cntl = RADEON_READ(RADEON_DAC_CNTL2); ++ uint32_t crtc2_gen_cntl = RADEON_READ(RADEON_CRTC2_GEN_CNTL); ++ ++ dac2_cntl &= ~RADEON_DAC2_DAC_CLK_SEL; ++ dac2_cntl |= RADEON_DAC2_DAC2_CLK_SEL; ++ ++ /* For CRT on DAC2, don't turn it on if BIOS didn't ++ enable it, even it's detected. ++ */ ++ disp_hw_debug |= RADEON_CRT2_DISP1_SEL; ++ tv_dac_cntl &= ~((1<<2) | (3<<8) | (7<<24) | (0xff<<16)); ++ tv_dac_cntl |= (0x03 | (2<<8) | (0x58<<16)); ++ ++ RADEON_WRITE(RADEON_TV_DAC_CNTL, tv_dac_cntl); ++ RADEON_WRITE(RADEON_DISP_HW_DEBUG, disp_hw_debug); ++ RADEON_WRITE(RADEON_DAC_CNTL2, dac2_cntl); ++ RADEON_WRITE(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl); ++ } ++ ++ RADEON_WRITE(RADEON_CRTC_GEN_CNTL, crtc_gen_cntl | ++ RADEON_CRTC_DISP_REQ_EN_B); ++ ++ RADEON_WRITE_P(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl, ++ RADEON_CRTC_VSYNC_DIS | RADEON_CRTC_HSYNC_DIS | RADEON_CRTC_DISPLAY_DIS); ++ ++ RADEON_WRITE(RADEON_CRTC_H_TOTAL_DISP, crtc_h_total_disp); ++ RADEON_WRITE(RADEON_CRTC_H_SYNC_STRT_WID, crtc_h_sync_strt_wid); ++ RADEON_WRITE(RADEON_CRTC_V_TOTAL_DISP, crtc_v_total_disp); ++ RADEON_WRITE(RADEON_CRTC_V_SYNC_STRT_WID, crtc_v_sync_strt_wid); ++ ++ RADEON_WRITE(RADEON_CRTC_GEN_CNTL, crtc_gen_cntl); ++ ++ return true; ++} ++ ++static void radeon_set_pll1(struct drm_crtc *crtc, struct drm_display_mode *mode) ++{ ++ struct drm_device *dev = crtc->dev; ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ struct drm_encoder *encoder; ++ uint32_t feedback_div = 0; ++ uint32_t reference_div = 0; ++ uint32_t post_divider = 0; ++ uint32_t freq = 0; ++ uint8_t pll_gain; ++ int pll_flags = RADEON_PLL_LEGACY; ++ bool use_bios_divs = false; ++ /* PLL registers */ ++ uint32_t ppll_ref_div = 0; ++ uint32_t ppll_div_3 = 0; ++ uint32_t htotal_cntl = 0; ++ uint32_t vclk_ecp_cntl; ++ ++ struct radeon_pll *pll = &dev_priv->mode_info.p1pll; ++ ++ struct { ++ int divider; ++ int bitvalue; ++ } *post_div, post_divs[] = { ++ /* From RAGE 128 VR/RAGE 128 GL Register ++ * Reference Manual (Technical Reference ++ * Manual P/N RRG-G04100-C Rev. 0.04), page ++ * 3-17 (PLL_DIV_[3:0]). ++ */ ++ { 1, 0 }, /* VCLK_SRC */ ++ { 2, 1 }, /* VCLK_SRC/2 */ ++ { 4, 2 }, /* VCLK_SRC/4 */ ++ { 8, 3 }, /* VCLK_SRC/8 */ ++ { 3, 4 }, /* VCLK_SRC/3 */ ++ { 16, 5 }, /* VCLK_SRC/16 */ ++ { 6, 6 }, /* VCLK_SRC/6 */ ++ { 12, 7 }, /* VCLK_SRC/12 */ ++ { 0, 0 } ++ }; ++ ++ if (mode->clock > 200000) /* range limits??? */ ++ pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; ++ else ++ pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV; ++ ++ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { ++ if (encoder->crtc == crtc) { ++ if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) ++ pll_flags |= RADEON_PLL_NO_ODD_POST_DIV; ++ if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) { ++ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); ++ ++ if (radeon_encoder->use_bios_dividers) { ++ ppll_ref_div = radeon_encoder->panel_ref_divider; ++ ppll_div_3 = (radeon_encoder->panel_fb_divider | ++ (radeon_encoder->panel_post_divider << 16)); ++ htotal_cntl = 0; ++ use_bios_divs = true; ++ } else ++ pll_flags |= RADEON_PLL_USE_REF_DIV; ++ } ++ } ++ } ++ ++ DRM_DEBUG("\n"); ++ ++ if (!use_bios_divs) { ++ radeon_compute_pll(pll, mode->clock, &freq, &feedback_div, &reference_div, &post_divider, pll_flags); ++ ++ for (post_div = &post_divs[0]; post_div->divider; ++post_div) { ++ if (post_div->divider == post_divider) ++ break; ++ } ++ ++ if (!post_div->divider) { ++ post_div = &post_divs[0]; ++ } ++ ++ DRM_DEBUG("dc=%u, fd=%d, rd=%d, pd=%d\n", ++ (unsigned)freq, ++ feedback_div, ++ reference_div, ++ post_divider); ++ ++ ppll_ref_div = reference_div; ++#if defined(__powerpc__) && (0) /* TODO */ ++ /* apparently programming this otherwise causes a hang??? */ ++ if (info->MacModel == RADEON_MAC_IBOOK) ++ state->ppll_div_3 = 0x000600ad; ++ else ++#endif ++ ppll_div_3 = (feedback_div | (post_div->bitvalue << 16)); ++ htotal_cntl = mode->htotal & 0x7; ++ ++ } ++ ++ vclk_ecp_cntl = (RADEON_READ_PLL(dev_priv, RADEON_VCLK_ECP_CNTL) & ++ ~RADEON_VCLK_SRC_SEL_MASK) | RADEON_VCLK_SRC_SEL_PPLLCLK; ++ ++ pll_gain = radeon_compute_pll_gain(dev_priv->mode_info.p1pll.reference_freq, ++ ppll_ref_div & RADEON_PPLL_REF_DIV_MASK, ++ ppll_div_3 & RADEON_PPLL_FB3_DIV_MASK); ++ ++ if (dev_priv->flags & RADEON_IS_MOBILITY) { ++ /* A temporal workaround for the occational blanking on certain laptop panels. ++ This appears to related to the PLL divider registers (fail to lock?). ++ It occurs even when all dividers are the same with their old settings. ++ In this case we really don't need to fiddle with PLL registers. ++ By doing this we can avoid the blanking problem with some panels. ++ */ ++ if ((ppll_ref_div == (RADEON_READ_PLL(dev_priv, RADEON_PPLL_REF_DIV) & RADEON_PPLL_REF_DIV_MASK)) && ++ (ppll_div_3 == (RADEON_READ_PLL(dev_priv, RADEON_PPLL_DIV_3) & ++ (RADEON_PPLL_POST3_DIV_MASK | RADEON_PPLL_FB3_DIV_MASK)))) { ++ RADEON_WRITE_P(RADEON_CLOCK_CNTL_INDEX, ++ RADEON_PLL_DIV_SEL, ++ ~(RADEON_PLL_DIV_SEL)); ++ radeon_pll_errata_after_index(dev_priv); ++ return; ++ } ++ } ++ ++ RADEON_WRITE_PLL_P(dev_priv, RADEON_VCLK_ECP_CNTL, ++ RADEON_VCLK_SRC_SEL_CPUCLK, ++ ~(RADEON_VCLK_SRC_SEL_MASK)); ++ RADEON_WRITE_PLL_P(dev_priv, ++ RADEON_PPLL_CNTL, ++ RADEON_PPLL_RESET ++ | RADEON_PPLL_ATOMIC_UPDATE_EN ++ | RADEON_PPLL_VGA_ATOMIC_UPDATE_EN ++ | ((uint32_t)pll_gain << RADEON_PPLL_PVG_SHIFT), ++ ~(RADEON_PPLL_RESET ++ | RADEON_PPLL_ATOMIC_UPDATE_EN ++ | RADEON_PPLL_VGA_ATOMIC_UPDATE_EN ++ | RADEON_PPLL_PVG_MASK)); ++ ++ RADEON_WRITE_P(RADEON_CLOCK_CNTL_INDEX, ++ RADEON_PLL_DIV_SEL, ++ ~(RADEON_PLL_DIV_SEL)); ++ radeon_pll_errata_after_index(dev_priv); ++ ++ if (radeon_is_r300(dev_priv) || ++ (dev_priv->chip_family == CHIP_RS300) || ++ (dev_priv->chip_family == CHIP_RS400) || ++ (dev_priv->chip_family == CHIP_RS480)) { ++ if (ppll_ref_div & R300_PPLL_REF_DIV_ACC_MASK) { ++ /* When restoring console mode, use saved PPLL_REF_DIV ++ * setting. ++ */ ++ RADEON_WRITE_PLL_P(dev_priv, RADEON_PPLL_REF_DIV, ++ ppll_ref_div, ++ 0); ++ } else { ++ /* R300 uses ref_div_acc field as real ref divider */ ++ RADEON_WRITE_PLL_P(dev_priv, RADEON_PPLL_REF_DIV, ++ (ppll_ref_div << R300_PPLL_REF_DIV_ACC_SHIFT), ++ ~R300_PPLL_REF_DIV_ACC_MASK); ++ } ++ } else { ++ RADEON_WRITE_PLL_P(dev_priv, RADEON_PPLL_REF_DIV, ++ ppll_ref_div, ++ ~RADEON_PPLL_REF_DIV_MASK); ++ } ++ ++ RADEON_WRITE_PLL_P(dev_priv, RADEON_PPLL_DIV_3, ++ ppll_div_3, ++ ~RADEON_PPLL_FB3_DIV_MASK); ++ ++ RADEON_WRITE_PLL_P(dev_priv, RADEON_PPLL_DIV_3, ++ ppll_div_3, ++ ~RADEON_PPLL_POST3_DIV_MASK); ++ ++ radeon_pll_write_update(dev); ++ radeon_pll_wait_for_read_update_complete(dev); ++ ++ RADEON_WRITE_PLL(dev_priv, RADEON_HTOTAL_CNTL, htotal_cntl); ++ ++ RADEON_WRITE_PLL_P(dev_priv, RADEON_PPLL_CNTL, ++ 0, ++ ~(RADEON_PPLL_RESET ++ | RADEON_PPLL_SLEEP ++ | RADEON_PPLL_ATOMIC_UPDATE_EN ++ | RADEON_PPLL_VGA_ATOMIC_UPDATE_EN)); ++ ++ DRM_DEBUG("Wrote: 0x%08x 0x%08x 0x%08x (0x%08x)\n", ++ ppll_ref_div, ++ ppll_div_3, ++ (unsigned)htotal_cntl, ++ RADEON_READ_PLL(dev_priv, RADEON_PPLL_CNTL)); ++ DRM_DEBUG("Wrote: rd=%d, fd=%d, pd=%d\n", ++ ppll_ref_div & RADEON_PPLL_REF_DIV_MASK, ++ ppll_div_3 & RADEON_PPLL_FB3_DIV_MASK, ++ (ppll_div_3 & RADEON_PPLL_POST3_DIV_MASK) >> 16); ++ ++ mdelay(50); /* Let the clock to lock */ ++ ++ RADEON_WRITE_PLL_P(dev_priv, RADEON_VCLK_ECP_CNTL, ++ RADEON_VCLK_SRC_SEL_PPLLCLK, ++ ~(RADEON_VCLK_SRC_SEL_MASK)); ++ ++ /*RADEON_WRITE_PLL(dev_priv, RADEON_VCLK_ECP_CNTL, vclk_ecp_cntl);*/ ++ ++} ++ ++static bool radeon_set_crtc2_base(struct drm_crtc *crtc, int x, int y) ++{ ++ struct drm_device *dev = crtc->dev; ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ struct radeon_framebuffer *radeon_fb; ++ struct drm_gem_object *obj; ++ struct drm_radeon_gem_object *obj_priv; ++ uint32_t base; ++ uint32_t crtc2_offset, crtc2_offset_cntl, crtc2_tile_x0_y0 = 0; ++ uint32_t crtc2_pitch; ++ uint32_t disp2_merge_cntl; ++ ++ DRM_DEBUG("\n"); ++ ++ radeon_fb = to_radeon_framebuffer(crtc->fb); ++ ++ obj = radeon_fb->obj; ++ obj_priv = obj->driver_private; ++ ++ crtc2_offset = obj_priv->bo->offset; ++ ++ crtc2_offset_cntl = 0; ++ ++ /* TODO tiling */ ++ if (0) { ++ if (radeon_is_r300(dev_priv)) ++ crtc2_offset_cntl |= (R300_CRTC_X_Y_MODE_EN | ++ R300_CRTC_MICRO_TILE_BUFFER_DIS | ++ R300_CRTC_MACRO_TILE_EN); ++ else ++ crtc2_offset_cntl |= RADEON_CRTC_TILE_EN; ++ } else { ++ if (radeon_is_r300(dev_priv)) ++ crtc2_offset_cntl &= ~(R300_CRTC_X_Y_MODE_EN | ++ R300_CRTC_MICRO_TILE_BUFFER_DIS | ++ R300_CRTC_MACRO_TILE_EN); ++ else ++ crtc2_offset_cntl &= ~RADEON_CRTC_TILE_EN; ++ } ++ ++ base = obj_priv->bo->offset; ++ ++ /* TODO more tiling */ ++ if (0) { ++ if (radeon_is_r300(dev_priv)) { ++ crtc2_tile_x0_y0 = x | (y << 16); ++ base &= ~0x7ff; ++ } else { ++ int byteshift = crtc->fb->bits_per_pixel >> 4; ++ int tile_addr = (((y >> 3) * crtc->fb->width + x) >> (8 - byteshift)) << 11; ++ base += tile_addr + ((x << byteshift) % 256) + ((y % 8) << 8); ++ crtc2_offset_cntl |= (y % 16); ++ } ++ } else { ++ int offset = y * crtc->fb->pitch + x; ++ switch (crtc->fb->bits_per_pixel) { ++ case 15: ++ case 16: ++ offset *= 2; ++ break; ++ case 24: ++ offset *= 3; ++ break; ++ case 32: ++ offset *= 4; ++ break; ++ default: ++ return false; ++ } ++ base += offset; ++ } ++ ++ base &= ~7; ++ ++ /* update sarea TODO */ ++ ++ crtc2_offset = base; ++ ++ crtc2_pitch = ((((crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8)) * crtc->fb->bits_per_pixel) + ++ ((crtc->fb->bits_per_pixel * 8) - 1)) / ++ (crtc->fb->bits_per_pixel * 8)); ++ crtc2_pitch |= crtc2_pitch << 16; ++ ++ RADEON_WRITE(RADEON_DISPLAY2_BASE_ADDR, dev_priv->fb_location); ++ ++ if (radeon_is_r300(dev_priv)) ++ RADEON_WRITE(R300_CRTC2_TILE_X0_Y0, crtc2_tile_x0_y0); ++ RADEON_WRITE(RADEON_CRTC2_OFFSET_CNTL, crtc2_offset_cntl); ++ RADEON_WRITE(RADEON_CRTC2_OFFSET, crtc2_offset); ++ RADEON_WRITE(RADEON_CRTC2_PITCH, crtc2_pitch); ++ ++ disp2_merge_cntl = RADEON_READ(RADEON_DISP2_MERGE_CNTL); ++ disp2_merge_cntl &= ~RADEON_DISP2_RGB_OFFSET_EN; ++ RADEON_WRITE(RADEON_DISP2_MERGE_CNTL, disp2_merge_cntl); ++ ++ return true; ++} ++ ++static bool radeon_set_crtc2_timing(struct drm_crtc *crtc, struct drm_display_mode *mode) ++{ ++ struct drm_device *dev = crtc->dev; ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ int format; ++ int hsync_start; ++ int hsync_wid; ++ int vsync_wid; ++ uint32_t crtc2_gen_cntl; ++ uint32_t crtc2_h_total_disp; ++ uint32_t crtc2_h_sync_strt_wid; ++ uint32_t crtc2_v_total_disp; ++ uint32_t crtc2_v_sync_strt_wid; ++ uint32_t fp_h2_sync_strt_wid; ++ uint32_t fp_v2_sync_strt_wid; ++ ++ DRM_DEBUG("\n"); ++ ++ switch (crtc->fb->bits_per_pixel) { ++ ++ case 15: /* 555 */ ++ format = 3; ++ break; ++ case 16: /* 565 */ ++ format = 4; ++ break; ++ case 24: /* RGB */ ++ format = 5; ++ break; ++ case 32: /* xRGB */ ++ format = 6; ++ break; ++ default: ++ return false; ++ } ++ ++ crtc2_h_total_disp = ++ ((((mode->crtc_htotal / 8) - 1) & 0x3ff) ++ | ((((mode->crtc_hdisplay / 8) - 1) & 0x1ff) << 16)); ++ ++ hsync_wid = (mode->crtc_hsync_end - mode->crtc_hsync_start) / 8; ++ if (!hsync_wid) ++ hsync_wid = 1; ++ hsync_start = mode->crtc_hsync_start - 8; ++ ++ crtc2_h_sync_strt_wid = ((hsync_start & 0x1fff) ++ | ((hsync_wid & 0x3f) << 16) ++ | ((mode->flags & DRM_MODE_FLAG_NHSYNC) ++ ? RADEON_CRTC_H_SYNC_POL ++ : 0)); ++ ++ /* This works for double scan mode. */ ++ crtc2_v_total_disp = (((mode->crtc_vtotal - 1) & 0xffff) ++ | ((mode->crtc_vdisplay - 1) << 16)); ++ ++ vsync_wid = mode->crtc_vsync_end - mode->crtc_vsync_start; ++ if (!vsync_wid) ++ vsync_wid = 1; ++ ++ crtc2_v_sync_strt_wid = (((mode->crtc_vsync_start - 1) & 0xfff) ++ | ((vsync_wid & 0x1f) << 16) ++ | ((mode->flags & DRM_MODE_FLAG_NVSYNC) ++ ? RADEON_CRTC2_V_SYNC_POL ++ : 0)); ++ ++ /* check to see if TV DAC is enabled for another crtc and keep it enabled */ ++ if (RADEON_READ(RADEON_CRTC2_GEN_CNTL) & RADEON_CRTC2_CRT2_ON) ++ crtc2_gen_cntl = RADEON_CRTC2_CRT2_ON; ++ else ++ crtc2_gen_cntl = 0; ++ ++ crtc2_gen_cntl |= (RADEON_CRTC2_EN ++ | (format << 8) ++ | RADEON_CRTC2_VSYNC_DIS ++ | RADEON_CRTC2_HSYNC_DIS ++ | RADEON_CRTC2_DISP_DIS ++ | ((mode->flags & DRM_MODE_FLAG_DBLSCAN) ++ ? RADEON_CRTC2_DBL_SCAN_EN ++ : 0) ++ | ((mode->flags & DRM_MODE_FLAG_CSYNC) ++ ? RADEON_CRTC2_CSYNC_EN ++ : 0) ++ | ((mode->flags & DRM_MODE_FLAG_INTERLACE) ++ ? RADEON_CRTC2_INTERLACE_EN ++ : 0)); ++ ++ fp_h2_sync_strt_wid = crtc2_h_sync_strt_wid; ++ fp_v2_sync_strt_wid = crtc2_v_sync_strt_wid; ++ ++ RADEON_WRITE(RADEON_CRTC2_GEN_CNTL, ++ crtc2_gen_cntl | RADEON_CRTC2_VSYNC_DIS | ++ RADEON_CRTC2_HSYNC_DIS | RADEON_CRTC2_DISP_DIS | ++ RADEON_CRTC2_DISP_REQ_EN_B); ++ ++ RADEON_WRITE(RADEON_CRTC2_H_TOTAL_DISP, crtc2_h_total_disp); ++ RADEON_WRITE(RADEON_CRTC2_H_SYNC_STRT_WID, crtc2_h_sync_strt_wid); ++ RADEON_WRITE(RADEON_CRTC2_V_TOTAL_DISP, crtc2_v_total_disp); ++ RADEON_WRITE(RADEON_CRTC2_V_SYNC_STRT_WID, crtc2_v_sync_strt_wid); ++ ++ RADEON_WRITE(RADEON_FP_H2_SYNC_STRT_WID, fp_h2_sync_strt_wid); ++ RADEON_WRITE(RADEON_FP_V2_SYNC_STRT_WID, fp_v2_sync_strt_wid); ++ ++ RADEON_WRITE(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl); ++ ++ return true; ++ ++} ++ ++static void radeon_set_pll2(struct drm_crtc *crtc, struct drm_display_mode *mode) ++{ ++ struct drm_device *dev = crtc->dev; ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ struct drm_encoder *encoder; ++ uint32_t feedback_div = 0; ++ uint32_t reference_div = 0; ++ uint32_t post_divider = 0; ++ uint32_t freq = 0; ++ uint8_t pll_gain; ++ int pll_flags = RADEON_PLL_LEGACY; ++ bool use_bios_divs = false; ++ /* PLL2 registers */ ++ uint32_t p2pll_ref_div = 0; ++ uint32_t p2pll_div_0 = 0; ++ uint32_t htotal_cntl2 = 0; ++ uint32_t pixclks_cntl; ++ ++ struct radeon_pll *pll = &dev_priv->mode_info.p2pll; ++ ++ struct { ++ int divider; ++ int bitvalue; ++ } *post_div, post_divs[] = { ++ /* From RAGE 128 VR/RAGE 128 GL Register ++ * Reference Manual (Technical Reference ++ * Manual P/N RRG-G04100-C Rev. 0.04), page ++ * 3-17 (PLL_DIV_[3:0]). ++ */ ++ { 1, 0 }, /* VCLK_SRC */ ++ { 2, 1 }, /* VCLK_SRC/2 */ ++ { 4, 2 }, /* VCLK_SRC/4 */ ++ { 8, 3 }, /* VCLK_SRC/8 */ ++ { 3, 4 }, /* VCLK_SRC/3 */ ++ { 6, 6 }, /* VCLK_SRC/6 */ ++ { 12, 7 }, /* VCLK_SRC/12 */ ++ { 0, 0 } ++ }; ++ ++ if (mode->clock > 200000) /* range limits??? */ ++ pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; ++ else ++ pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV; ++ ++ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { ++ if (encoder->crtc == crtc) { ++ if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) ++ pll_flags |= RADEON_PLL_NO_ODD_POST_DIV; ++ if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) { ++ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); ++ ++ if (radeon_encoder->use_bios_dividers) { ++ p2pll_ref_div = radeon_encoder->panel_ref_divider; ++ p2pll_div_0 = (radeon_encoder->panel_fb_divider | ++ (radeon_encoder->panel_post_divider << 16)); ++ htotal_cntl2 = 0; ++ use_bios_divs = true; ++ } else ++ pll_flags |= RADEON_PLL_USE_REF_DIV; ++ } ++ } ++ } ++ ++ DRM_DEBUG("\n"); ++ ++ if (!use_bios_divs) { ++ radeon_compute_pll(pll, mode->clock, &freq, &feedback_div, &reference_div, &post_divider, pll_flags); ++ ++ for (post_div = &post_divs[0]; post_div->divider; ++post_div) { ++ if (post_div->divider == post_divider) ++ break; ++ } ++ ++ if (!post_div->divider) { ++ post_div = &post_divs[0]; ++ } ++ ++ DRM_DEBUG("dc=%u, fd=%d, rd=%d, pd=%d\n", ++ (unsigned)freq, ++ feedback_div, ++ reference_div, ++ post_divider); ++ ++ p2pll_ref_div = reference_div; ++ p2pll_div_0 = (feedback_div | (post_div->bitvalue << 16)); ++ htotal_cntl2 = mode->htotal & 0x7; ++ ++ } ++ ++ pixclks_cntl = ((RADEON_READ_PLL(dev_priv, RADEON_PIXCLKS_CNTL) & ++ ~(RADEON_PIX2CLK_SRC_SEL_MASK)) | ++ RADEON_PIX2CLK_SRC_SEL_P2PLLCLK); ++ ++ pll_gain = radeon_compute_pll_gain(dev_priv->mode_info.p2pll.reference_freq, ++ p2pll_ref_div & RADEON_P2PLL_REF_DIV_MASK, ++ p2pll_div_0 & RADEON_P2PLL_FB0_DIV_MASK); ++ ++ ++ RADEON_WRITE_PLL_P(dev_priv, RADEON_PIXCLKS_CNTL, ++ RADEON_PIX2CLK_SRC_SEL_CPUCLK, ++ ~(RADEON_PIX2CLK_SRC_SEL_MASK)); ++ ++ RADEON_WRITE_PLL_P(dev_priv, ++ RADEON_P2PLL_CNTL, ++ RADEON_P2PLL_RESET ++ | RADEON_P2PLL_ATOMIC_UPDATE_EN ++ | ((uint32_t)pll_gain << RADEON_P2PLL_PVG_SHIFT), ++ ~(RADEON_P2PLL_RESET ++ | RADEON_P2PLL_ATOMIC_UPDATE_EN ++ | RADEON_P2PLL_PVG_MASK)); ++ ++ ++ RADEON_WRITE_PLL_P(dev_priv, RADEON_P2PLL_REF_DIV, ++ p2pll_ref_div, ++ ~RADEON_P2PLL_REF_DIV_MASK); ++ ++ RADEON_WRITE_PLL_P(dev_priv, RADEON_P2PLL_DIV_0, ++ p2pll_div_0, ++ ~RADEON_P2PLL_FB0_DIV_MASK); ++ ++ RADEON_WRITE_PLL_P(dev_priv, RADEON_P2PLL_DIV_0, ++ p2pll_div_0, ++ ~RADEON_P2PLL_POST0_DIV_MASK); ++ ++ radeon_pll2_write_update(dev); ++ radeon_pll2_wait_for_read_update_complete(dev); ++ ++ RADEON_WRITE_PLL(dev_priv, RADEON_HTOTAL2_CNTL, htotal_cntl2); ++ ++ RADEON_WRITE_PLL_P(dev_priv, RADEON_P2PLL_CNTL, ++ 0, ++ ~(RADEON_P2PLL_RESET ++ | RADEON_P2PLL_SLEEP ++ | RADEON_P2PLL_ATOMIC_UPDATE_EN)); ++ ++ DRM_DEBUG("Wrote2: 0x%08x 0x%08x 0x%08x (0x%08x)\n", ++ (unsigned)p2pll_ref_div, ++ (unsigned)p2pll_div_0, ++ (unsigned)htotal_cntl2, ++ RADEON_READ_PLL(dev_priv, RADEON_P2PLL_CNTL)); ++ DRM_DEBUG("Wrote2: rd=%u, fd=%u, pd=%u\n", ++ (unsigned)p2pll_ref_div & RADEON_P2PLL_REF_DIV_MASK, ++ (unsigned)p2pll_div_0 & RADEON_P2PLL_FB0_DIV_MASK, ++ (unsigned)((p2pll_div_0 & ++ RADEON_P2PLL_POST0_DIV_MASK) >>16)); ++ ++ mdelay(50); /* Let the clock to lock */ ++ ++ RADEON_WRITE_PLL_P(dev_priv, RADEON_PIXCLKS_CNTL, ++ RADEON_PIX2CLK_SRC_SEL_P2PLLCLK, ++ ~(RADEON_PIX2CLK_SRC_SEL_MASK)); ++ ++ RADEON_WRITE_PLL(dev_priv, RADEON_PIXCLKS_CNTL, pixclks_cntl); ++ ++} ++ ++static bool radeon_crtc_mode_fixup(struct drm_crtc *crtc, ++ struct drm_display_mode *mode, ++ struct drm_display_mode *adjusted_mode) ++{ ++ return true; ++} ++ ++void radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y) ++{ ++ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); ++ ++ switch(radeon_crtc->crtc_id) { ++ case 0: ++ radeon_set_crtc1_base(crtc, x, y); ++ break; ++ case 1: ++ radeon_set_crtc2_base(crtc, x, y); ++ break; ++ ++ } ++} ++ ++static void radeon_crtc_mode_set(struct drm_crtc *crtc, ++ struct drm_display_mode *mode, ++ struct drm_display_mode *adjusted_mode, ++ int x, int y) ++{ ++ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); ++ ++ DRM_DEBUG("\n"); ++ ++ /* TODO TV */ ++ ++ radeon_crtc_set_base(crtc, x, y); ++ ++ switch(radeon_crtc->crtc_id) { ++ case 0: ++ radeon_set_crtc1_timing(crtc, adjusted_mode); ++ radeon_set_pll1(crtc, adjusted_mode); ++ break; ++ case 1: ++ radeon_set_crtc2_timing(crtc, adjusted_mode); ++ radeon_set_pll2(crtc, adjusted_mode); ++ break; ++ ++ } ++} ++ ++static void radeon_crtc_prepare(struct drm_crtc *crtc) ++{ ++ radeon_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); ++} ++ ++static void radeon_crtc_commit(struct drm_crtc *crtc) ++{ ++ radeon_crtc_dpms(crtc, DRM_MODE_DPMS_ON); ++} ++ ++static const struct drm_crtc_helper_funcs legacy_helper_funcs = { ++ .dpms = radeon_crtc_dpms, ++ .mode_fixup = radeon_crtc_mode_fixup, ++ .mode_set = radeon_crtc_mode_set, ++ .mode_set_base = radeon_crtc_set_base, ++ .prepare = radeon_crtc_prepare, ++ .commit = radeon_crtc_commit, ++}; ++ ++ ++void radeon_legacy_init_crtc(struct drm_device *dev, ++ struct radeon_crtc *radeon_crtc) ++{ ++ drm_crtc_helper_add(&radeon_crtc->base, &legacy_helper_funcs); ++} +diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c +new file mode 100644 +index 0000000..3df89d3 +--- /dev/null ++++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c +@@ -0,0 +1,1371 @@ ++/* ++ * Copyright 2007-8 Advanced Micro Devices, Inc. ++ * Copyright 2008 Red Hat Inc. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: Dave Airlie ++ * Alex Deucher ++ */ ++#include "drmP.h" ++#include "drm_crtc_helper.h" ++#include "radeon_drm.h" ++#include "radeon_drv.h" ++ ++ ++static void radeon_legacy_rmx_mode_set(struct drm_encoder *encoder, ++ struct drm_display_mode *mode, ++ struct drm_display_mode *adjusted_mode) ++{ ++ struct drm_device *dev = encoder->dev; ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); ++ int xres = mode->hdisplay; ++ int yres = mode->vdisplay; ++ bool hscale = true, vscale = true; ++ int hsync_wid; ++ int vsync_wid; ++ int hsync_start; ++ uint32_t scale, inc; ++ uint32_t fp_horz_stretch, fp_vert_stretch, crtc_more_cntl, fp_horz_vert_active; ++ uint32_t fp_h_sync_strt_wid, fp_v_sync_strt_wid, fp_crtc_h_total_disp, fp_crtc_v_total_disp; ++ ++ DRM_DEBUG("\n"); ++ ++ fp_vert_stretch = RADEON_READ(RADEON_FP_VERT_STRETCH) & ++ (RADEON_VERT_STRETCH_RESERVED | ++ RADEON_VERT_AUTO_RATIO_INC); ++ fp_horz_stretch = RADEON_READ(RADEON_FP_HORZ_STRETCH) & ++ (RADEON_HORZ_FP_LOOP_STRETCH | ++ RADEON_HORZ_AUTO_RATIO_INC); ++ ++ crtc_more_cntl = 0; ++ if ((dev_priv->chip_family == CHIP_RS100) || ++ (dev_priv->chip_family == CHIP_RS200)) { ++ /* This is to workaround the asic bug for RMX, some versions ++ of BIOS dosen't have this register initialized correctly. */ ++ crtc_more_cntl |= RADEON_CRTC_H_CUTOFF_ACTIVE_EN; ++ } ++ ++ ++ fp_crtc_h_total_disp = ((((mode->crtc_htotal / 8) - 1) & 0x3ff) ++ | ((((mode->crtc_hdisplay / 8) - 1) & 0x1ff) << 16)); ++ ++ hsync_wid = (mode->crtc_hsync_end - mode->crtc_hsync_start) / 8; ++ if (!hsync_wid) ++ hsync_wid = 1; ++ hsync_start = mode->crtc_hsync_start - 8; ++ ++ fp_h_sync_strt_wid = ((hsync_start & 0x1fff) ++ | ((hsync_wid & 0x3f) << 16) ++ | ((mode->flags & DRM_MODE_FLAG_NHSYNC) ++ ? RADEON_CRTC_H_SYNC_POL ++ : 0)); ++ ++ fp_crtc_v_total_disp = (((mode->crtc_vtotal - 1) & 0xffff) ++ | ((mode->crtc_vdisplay - 1) << 16)); ++ ++ vsync_wid = mode->crtc_vsync_end - mode->crtc_vsync_start; ++ if (!vsync_wid) ++ vsync_wid = 1; ++ ++ fp_v_sync_strt_wid = (((mode->crtc_vsync_start - 1) & 0xfff) ++ | ((vsync_wid & 0x1f) << 16) ++ | ((mode->flags & DRM_MODE_FLAG_NVSYNC) ++ ? RADEON_CRTC_V_SYNC_POL ++ : 0)); ++ ++ fp_horz_vert_active = 0; ++ ++ if (radeon_encoder->panel_xres == 0 || ++ radeon_encoder->panel_yres == 0) { ++ hscale = false; ++ vscale = false; ++ } else { ++ if (xres > radeon_encoder->panel_xres) ++ xres = radeon_encoder->panel_xres; ++ if (yres > radeon_encoder->panel_yres) ++ yres = radeon_encoder->panel_yres; ++ ++ if (xres == radeon_encoder->panel_xres) ++ hscale = false; ++ if (yres == radeon_encoder->panel_yres) ++ vscale = false; ++ } ++ ++ if (radeon_encoder->flags & RADEON_USE_RMX) { ++ if (radeon_encoder->rmx_type != RMX_CENTER) { ++ if (!hscale) ++ fp_horz_stretch |= ((xres/8-1) << 16); ++ else { ++ inc = (fp_horz_stretch & RADEON_HORZ_AUTO_RATIO_INC) ? 1 : 0; ++ scale = ((xres + inc) * RADEON_HORZ_STRETCH_RATIO_MAX) ++ / radeon_encoder->panel_xres + 1; ++ fp_horz_stretch |= (((scale) & RADEON_HORZ_STRETCH_RATIO_MASK) | ++ RADEON_HORZ_STRETCH_BLEND | ++ RADEON_HORZ_STRETCH_ENABLE | ++ ((radeon_encoder->panel_xres/8-1) << 16)); ++ } ++ ++ if (!vscale) ++ fp_vert_stretch |= ((yres-1) << 12); ++ else { ++ inc = (fp_vert_stretch & RADEON_VERT_AUTO_RATIO_INC) ? 1 : 0; ++ scale = ((yres + inc) * RADEON_VERT_STRETCH_RATIO_MAX) ++ / radeon_encoder->panel_yres + 1; ++ fp_vert_stretch |= (((scale) & RADEON_VERT_STRETCH_RATIO_MASK) | ++ RADEON_VERT_STRETCH_ENABLE | ++ RADEON_VERT_STRETCH_BLEND | ++ ((radeon_encoder->panel_yres-1) << 12)); ++ } ++ } else if (radeon_encoder->rmx_type == RMX_CENTER) { ++ int blank_width; ++ ++ fp_horz_stretch |= ((xres/8-1) << 16); ++ fp_vert_stretch |= ((yres-1) << 12); ++ ++ crtc_more_cntl |= (RADEON_CRTC_AUTO_HORZ_CENTER_EN | ++ RADEON_CRTC_AUTO_VERT_CENTER_EN); ++ ++ blank_width = (mode->crtc_hblank_end - mode->crtc_hblank_start) / 8; ++ if (blank_width > 110) ++ blank_width = 110; ++ ++ fp_crtc_h_total_disp = (((blank_width) & 0x3ff) ++ | ((((mode->crtc_hdisplay / 8) - 1) & 0x1ff) << 16)); ++ ++ hsync_wid = (mode->crtc_hsync_end - mode->crtc_hsync_start) / 8; ++ if (!hsync_wid) ++ hsync_wid = 1; ++ ++ fp_h_sync_strt_wid = ((((mode->crtc_hsync_start - mode->crtc_hblank_start) / 8) & 0x1fff) ++ | ((hsync_wid & 0x3f) << 16) ++ | ((mode->flags & DRM_MODE_FLAG_NHSYNC) ++ ? RADEON_CRTC_H_SYNC_POL ++ : 0)); ++ ++ fp_crtc_v_total_disp = (((mode->crtc_vblank_end - mode->crtc_vblank_start) & 0xffff) ++ | ((mode->crtc_vdisplay - 1) << 16)); ++ ++ vsync_wid = mode->crtc_vsync_end - mode->crtc_vsync_start; ++ if (!vsync_wid) ++ vsync_wid = 1; ++ ++ fp_v_sync_strt_wid = ((((mode->crtc_vsync_start - mode->crtc_vblank_start) & 0xfff) ++ | ((vsync_wid & 0x1f) << 16) ++ | ((mode->flags & DRM_MODE_FLAG_NVSYNC) ++ ? RADEON_CRTC_V_SYNC_POL ++ : 0))); ++ ++ fp_horz_vert_active = (((radeon_encoder->panel_yres) & 0xfff) | ++ (((radeon_encoder->panel_xres / 8) & 0x1ff) << 16)); ++ } ++ } else { ++ fp_horz_stretch |= ((xres/8-1) << 16); ++ fp_vert_stretch |= ((yres-1) << 12); ++ } ++ ++ RADEON_WRITE(RADEON_FP_HORZ_STRETCH, fp_horz_stretch); ++ RADEON_WRITE(RADEON_FP_VERT_STRETCH, fp_vert_stretch); ++ RADEON_WRITE(RADEON_CRTC_MORE_CNTL, crtc_more_cntl); ++ RADEON_WRITE(RADEON_FP_HORZ_VERT_ACTIVE, fp_horz_vert_active); ++ RADEON_WRITE(RADEON_FP_H_SYNC_STRT_WID, fp_h_sync_strt_wid); ++ RADEON_WRITE(RADEON_FP_V_SYNC_STRT_WID, fp_v_sync_strt_wid); ++ RADEON_WRITE(RADEON_FP_CRTC_H_TOTAL_DISP, fp_crtc_h_total_disp); ++ RADEON_WRITE(RADEON_FP_CRTC_V_TOTAL_DISP, fp_crtc_v_total_disp); ++ ++} ++ ++static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode) ++{ ++ struct drm_device *dev = encoder->dev; ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); ++ struct radeon_crtc *radeon_crtc; ++ uint32_t lvds_gen_cntl, lvds_pll_cntl, pixclks_cntl, disp_pwr_man; ++ uint32_t bios_5_scratch, bios_6_scratch; ++ int crtc_id = 0; ++ DRM_DEBUG("\n"); ++ ++ if (encoder->crtc) { ++ radeon_crtc = to_radeon_crtc(encoder->crtc); ++ crtc_id = radeon_crtc->crtc_id; ++ } ++ ++ // FIXME atom/legacy cards like r4xx ++ bios_5_scratch = RADEON_READ(RADEON_BIOS_5_SCRATCH); ++ bios_6_scratch = RADEON_READ(RADEON_BIOS_6_SCRATCH); ++ ++ bios_5_scratch &= ~RADEON_LCD1_CRTC_MASK; ++ bios_5_scratch |= (crtc_id << RADEON_LCD1_CRTC_SHIFT); ++ ++ switch (mode) { ++ case DRM_MODE_DPMS_ON: ++ disp_pwr_man = RADEON_READ(RADEON_DISP_PWR_MAN); ++ disp_pwr_man |= RADEON_AUTO_PWRUP_EN; ++ RADEON_WRITE(RADEON_DISP_PWR_MAN, disp_pwr_man); ++ lvds_pll_cntl = RADEON_READ(RADEON_LVDS_PLL_CNTL); ++ lvds_pll_cntl |= RADEON_LVDS_PLL_EN; ++ RADEON_WRITE(RADEON_LVDS_PLL_CNTL, lvds_pll_cntl); ++ udelay(1000); ++ ++ lvds_pll_cntl = RADEON_READ(RADEON_LVDS_PLL_CNTL); ++ lvds_pll_cntl &= ~RADEON_LVDS_PLL_RESET; ++ RADEON_WRITE(RADEON_LVDS_PLL_CNTL, lvds_pll_cntl); ++ ++ lvds_gen_cntl = RADEON_READ(RADEON_LVDS_GEN_CNTL); ++ lvds_gen_cntl |= (RADEON_LVDS_ON | RADEON_LVDS_EN | RADEON_LVDS_DIGON | RADEON_LVDS_BLON); ++ lvds_gen_cntl &= ~(RADEON_LVDS_DISPLAY_DIS); ++ udelay(radeon_encoder->panel_pwr_delay * 1000); ++ RADEON_WRITE(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl); ++ ++ /* update bios scratch regs */ ++ bios_5_scratch |= RADEON_LCD1_ON; ++ bios_6_scratch |= RADEON_LCD_DPMS_ON; ++ ++ break; ++ case DRM_MODE_DPMS_STANDBY: ++ case DRM_MODE_DPMS_SUSPEND: ++ case DRM_MODE_DPMS_OFF: ++ pixclks_cntl = RADEON_READ_PLL(dev_priv, RADEON_PIXCLKS_CNTL); ++ RADEON_WRITE_PLL_P(dev_priv, RADEON_PIXCLKS_CNTL, 0, ~RADEON_PIXCLK_LVDS_ALWAYS_ONb); ++ lvds_gen_cntl = RADEON_READ(RADEON_LVDS_GEN_CNTL); ++ lvds_gen_cntl |= RADEON_LVDS_DISPLAY_DIS; ++ lvds_gen_cntl &= ~(RADEON_LVDS_ON | RADEON_LVDS_BLON | RADEON_LVDS_EN | RADEON_LVDS_DIGON); ++ udelay(radeon_encoder->panel_pwr_delay * 1000); ++ RADEON_WRITE(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl); ++ RADEON_WRITE_PLL(dev_priv, RADEON_PIXCLKS_CNTL, pixclks_cntl); ++ ++ bios_5_scratch &= ~RADEON_LCD1_ON; ++ bios_6_scratch &= ~RADEON_LCD_DPMS_ON; ++ break; ++ } ++ RADEON_WRITE(RADEON_BIOS_5_SCRATCH, bios_5_scratch); ++ RADEON_WRITE(RADEON_BIOS_6_SCRATCH, bios_6_scratch); ++} ++ ++static void radeon_legacy_lvds_prepare(struct drm_encoder *encoder) ++{ ++ struct drm_radeon_private *dev_priv = encoder->dev->dev_private; ++ // fix me: atom/legacy r4xx ++ if (!dev_priv->is_atom_bios) ++ radeon_combios_output_lock(encoder, true); ++ radeon_legacy_lvds_dpms(encoder, DRM_MODE_DPMS_OFF); ++} ++ ++static void radeon_legacy_lvds_commit(struct drm_encoder *encoder) ++{ ++ struct drm_radeon_private *dev_priv = encoder->dev->dev_private; ++ radeon_legacy_lvds_dpms(encoder, DRM_MODE_DPMS_ON); ++ // fix me: atom/legacy r4xx ++ if (!dev_priv->is_atom_bios) ++ radeon_combios_output_lock(encoder, false); ++} ++ ++static void radeon_legacy_lvds_mode_set(struct drm_encoder *encoder, ++ struct drm_display_mode *mode, ++ struct drm_display_mode *adjusted_mode) ++{ ++ struct drm_device *dev = encoder->dev; ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); ++ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); ++ uint32_t lvds_pll_cntl, lvds_gen_cntl, lvds_ss_gen_cntl; ++ ++ DRM_DEBUG("\n"); ++ ++ if (radeon_crtc->crtc_id == 0) ++ radeon_legacy_rmx_mode_set(encoder, mode, adjusted_mode); ++ ++ lvds_pll_cntl = RADEON_READ(RADEON_LVDS_PLL_CNTL); ++ lvds_pll_cntl &= ~RADEON_LVDS_PLL_EN; ++ if (radeon_encoder->lvds_gen_cntl) ++ lvds_gen_cntl = radeon_encoder->lvds_gen_cntl; ++ else ++ lvds_gen_cntl = RADEON_READ(RADEON_LVDS_GEN_CNTL); ++ lvds_gen_cntl |= RADEON_LVDS_DISPLAY_DIS; ++ lvds_gen_cntl &= ~(RADEON_LVDS_ON | ++ RADEON_LVDS_BLON | ++ RADEON_LVDS_EN | ++ RADEON_LVDS_RST_FM); ++ ++ DRM_INFO("bios LVDS_GEN_CNTL: 0x%x\n", radeon_encoder->lvds_gen_cntl); ++ ++ if (radeon_is_r300(dev_priv)) ++ lvds_pll_cntl &= ~(R300_LVDS_SRC_SEL_MASK); ++ ++ if (radeon_crtc->crtc_id == 0) { ++ if (radeon_is_r300(dev_priv)) { ++ if (radeon_encoder->flags & RADEON_USE_RMX) ++ lvds_pll_cntl |= R300_LVDS_SRC_SEL_RMX; ++ } else ++ lvds_gen_cntl &= ~RADEON_LVDS_SEL_CRTC2; ++ } else { ++ if (radeon_is_r300(dev_priv)) ++ lvds_pll_cntl |= R300_LVDS_SRC_SEL_CRTC2; ++ else ++ lvds_gen_cntl |= RADEON_LVDS_SEL_CRTC2; ++ } ++ ++ RADEON_WRITE(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl); ++ RADEON_WRITE(RADEON_LVDS_PLL_CNTL, lvds_pll_cntl); ++ ++ lvds_ss_gen_cntl = RADEON_READ(RADEON_LVDS_SS_GEN_CNTL); ++ if (radeon_encoder->panel_digon_delay && ++ radeon_encoder->panel_blon_delay) { ++ lvds_ss_gen_cntl &= ~((0xf << RADEON_LVDS_PWRSEQ_DELAY1_SHIFT) | ++ (0xf << RADEON_LVDS_PWRSEQ_DELAY2_SHIFT)); ++ lvds_ss_gen_cntl |= ((radeon_encoder->panel_digon_delay << RADEON_LVDS_PWRSEQ_DELAY1_SHIFT) | ++ (radeon_encoder->panel_blon_delay << RADEON_LVDS_PWRSEQ_DELAY2_SHIFT)); ++ RADEON_WRITE(RADEON_LVDS_SS_GEN_CNTL, lvds_ss_gen_cntl); ++ } ++ ++ if (dev_priv->chip_family == CHIP_RV410) ++ RADEON_WRITE(RADEON_CLOCK_CNTL_INDEX, 0); ++} ++ ++static bool radeon_legacy_lvds_mode_fixup(struct drm_encoder *encoder, ++ struct drm_display_mode *mode, ++ struct drm_display_mode *adjusted_mode) ++{ ++ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); ++ ++ radeon_encoder->flags &= ~RADEON_USE_RMX; ++ ++ if (radeon_encoder->rmx_type != RMX_OFF) ++ radeon_rmx_mode_fixup(encoder, mode, adjusted_mode); ++ ++ return true; ++} ++ ++static const struct drm_encoder_helper_funcs radeon_legacy_lvds_helper_funcs = { ++ .dpms = radeon_legacy_lvds_dpms, ++ .mode_fixup = radeon_legacy_lvds_mode_fixup, ++ .prepare = radeon_legacy_lvds_prepare, ++ .mode_set = radeon_legacy_lvds_mode_set, ++ .commit = radeon_legacy_lvds_commit, ++}; ++ ++ ++static const struct drm_encoder_funcs radeon_legacy_lvds_enc_funcs = { ++ .destroy = radeon_enc_destroy, ++}; ++ ++ ++struct drm_encoder *radeon_encoder_legacy_lvds_add(struct drm_device *dev, int bios_index) ++{ ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ struct radeon_encoder *radeon_encoder; ++ struct drm_encoder *encoder; ++ ++ DRM_DEBUG("\n"); ++ ++ radeon_encoder = kzalloc(sizeof(struct radeon_encoder), GFP_KERNEL); ++ if (!radeon_encoder) { ++ return NULL; ++ } ++ ++ encoder = &radeon_encoder->base; ++ ++ /* Limit LVDS to crtc 0 for RMX */ ++ encoder->possible_crtcs = 0x1; ++ encoder->possible_clones = 0; ++ drm_encoder_init(dev, encoder, &radeon_legacy_lvds_enc_funcs, ++ DRM_MODE_ENCODER_LVDS); ++ ++ drm_encoder_helper_add(encoder, &radeon_legacy_lvds_helper_funcs); ++ ++ /* get the lvds info from the bios */ ++ if (dev_priv->is_atom_bios) ++ radeon_atombios_get_lvds_info(radeon_encoder); ++ else ++ radeon_combios_get_lvds_info(radeon_encoder); ++ ++ /* LVDS gets default RMX full scaling */ ++ radeon_encoder->rmx_type = RMX_FULL; ++ ++ return encoder; ++} ++ ++static bool radeon_legacy_primary_dac_mode_fixup(struct drm_encoder *encoder, ++ struct drm_display_mode *mode, ++ struct drm_display_mode *adjusted_mode) ++{ ++ return true; ++} ++ ++static void radeon_legacy_primary_dac_dpms(struct drm_encoder *encoder, int mode) ++{ ++ struct drm_device *dev = encoder->dev; ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ struct radeon_crtc *radeon_crtc; ++ uint32_t crtc_ext_cntl = RADEON_READ(RADEON_CRTC_EXT_CNTL); ++ uint32_t dac_cntl = RADEON_READ(RADEON_DAC_CNTL); ++ uint32_t dac_macro_cntl = RADEON_READ(RADEON_DAC_MACRO_CNTL); ++ uint32_t bios_5_scratch, bios_6_scratch; ++ int crtc_id = 0; ++ ++ DRM_DEBUG("\n"); ++ ++ if (encoder->crtc) { ++ radeon_crtc = to_radeon_crtc(encoder->crtc); ++ crtc_id = radeon_crtc->crtc_id; ++ } ++ ++ // FIXME atom/legacy cards like r4xx ++ bios_5_scratch = RADEON_READ(RADEON_BIOS_5_SCRATCH); ++ bios_6_scratch = RADEON_READ(RADEON_BIOS_6_SCRATCH); ++ ++ bios_5_scratch &= ~RADEON_CRT1_CRTC_MASK; ++ bios_5_scratch |= (crtc_id << RADEON_CRT1_CRTC_SHIFT); ++ ++ DRM_DEBUG("\n"); ++ ++ switch(mode) { ++ case DRM_MODE_DPMS_ON: ++ crtc_ext_cntl |= RADEON_CRTC_CRT_ON; ++ dac_cntl &= ~RADEON_DAC_PDWN; ++ dac_macro_cntl &= ~(RADEON_DAC_PDWN_R | ++ RADEON_DAC_PDWN_G | ++ RADEON_DAC_PDWN_B); ++ bios_5_scratch |= RADEON_CRT1_ON; ++ bios_6_scratch |= RADEON_CRT_DPMS_ON; ++ break; ++ case DRM_MODE_DPMS_STANDBY: ++ case DRM_MODE_DPMS_SUSPEND: ++ case DRM_MODE_DPMS_OFF: ++ crtc_ext_cntl &= ~RADEON_CRTC_CRT_ON; ++ dac_cntl |= RADEON_DAC_PDWN; ++ dac_macro_cntl |= (RADEON_DAC_PDWN_R | ++ RADEON_DAC_PDWN_G | ++ RADEON_DAC_PDWN_B); ++ bios_5_scratch &= ~RADEON_CRT1_ON; ++ bios_6_scratch &= ~RADEON_CRT_DPMS_ON; ++ break; ++ } ++ ++ RADEON_WRITE(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl); ++ RADEON_WRITE(RADEON_DAC_CNTL, dac_cntl); ++ RADEON_WRITE(RADEON_DAC_MACRO_CNTL, dac_macro_cntl); ++ ++ RADEON_WRITE(RADEON_BIOS_5_SCRATCH, bios_5_scratch); ++ RADEON_WRITE(RADEON_BIOS_6_SCRATCH, bios_6_scratch); ++} ++ ++static void radeon_legacy_primary_dac_prepare(struct drm_encoder *encoder) ++{ ++ struct drm_radeon_private *dev_priv = encoder->dev->dev_private; ++ // fix me: atom/legacy r4xx ++ if (!dev_priv->is_atom_bios) ++ radeon_combios_output_lock(encoder, true); ++ radeon_legacy_primary_dac_dpms(encoder, DRM_MODE_DPMS_OFF); ++} ++ ++static void radeon_legacy_primary_dac_commit(struct drm_encoder *encoder) ++{ ++ struct drm_radeon_private *dev_priv = encoder->dev->dev_private; ++ radeon_legacy_primary_dac_dpms(encoder, DRM_MODE_DPMS_ON); ++ // fix me: atom/legacy r4xx ++ if (!dev_priv->is_atom_bios) ++ radeon_combios_output_lock(encoder, false); ++} ++ ++static void radeon_legacy_primary_dac_mode_set(struct drm_encoder *encoder, ++ struct drm_display_mode *mode, ++ struct drm_display_mode *adjusted_mode) ++{ ++ struct drm_device *dev = encoder->dev; ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); ++ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); ++ uint32_t disp_output_cntl, dac_cntl, dac2_cntl, dac_macro_cntl; ++ ++ DRM_DEBUG("\n"); ++ ++ if (radeon_crtc->crtc_id == 0) ++ radeon_legacy_rmx_mode_set(encoder, mode, adjusted_mode); ++ ++ if (radeon_crtc->crtc_id == 0) { ++ if (dev_priv->chip_family == CHIP_R200 || radeon_is_r300(dev_priv)) { ++ disp_output_cntl = RADEON_READ(RADEON_DISP_OUTPUT_CNTL) & ++ ~(RADEON_DISP_DAC_SOURCE_MASK); ++ RADEON_WRITE(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl); ++ } else { ++ dac2_cntl = RADEON_READ(RADEON_DAC_CNTL2) & ~(RADEON_DAC2_DAC_CLK_SEL); ++ RADEON_WRITE(RADEON_DAC_CNTL2, dac2_cntl); ++ } ++ } else { ++ if (dev_priv->chip_family == CHIP_R200 || radeon_is_r300(dev_priv)) { ++ disp_output_cntl = RADEON_READ(RADEON_DISP_OUTPUT_CNTL) & ++ ~(RADEON_DISP_DAC_SOURCE_MASK); ++ disp_output_cntl |= RADEON_DISP_DAC_SOURCE_CRTC2; ++ RADEON_WRITE(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl); ++ } else { ++ dac2_cntl = RADEON_READ(RADEON_DAC_CNTL2) | RADEON_DAC2_DAC_CLK_SEL; ++ RADEON_WRITE(RADEON_DAC_CNTL2, dac2_cntl); ++ } ++ } ++ ++ dac_cntl = (RADEON_DAC_MASK_ALL | ++ RADEON_DAC_VGA_ADR_EN | ++ /* TODO 6-bits */ ++ RADEON_DAC_8BIT_EN); ++ ++ RADEON_WRITE_P(RADEON_DAC_CNTL, ++ dac_cntl, ++ RADEON_DAC_RANGE_CNTL | ++ RADEON_DAC_BLANKING); ++ ++ if (radeon_encoder->ps2_pdac_adj) ++ dac_macro_cntl = radeon_encoder->ps2_pdac_adj; ++ else ++ dac_macro_cntl = RADEON_READ(RADEON_DAC_MACRO_CNTL); ++ dac_macro_cntl |= RADEON_DAC_PDWN_R | RADEON_DAC_PDWN_G | RADEON_DAC_PDWN_B; ++ RADEON_WRITE(RADEON_DAC_MACRO_CNTL, dac_macro_cntl); ++} ++ ++static enum drm_connector_status radeon_legacy_primary_dac_detect(struct drm_encoder *encoder, ++ struct drm_connector *connector) ++{ ++ struct drm_device *dev = encoder->dev; ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ uint32_t vclk_ecp_cntl, crtc_ext_cntl; ++ uint32_t dac_ext_cntl, dac_cntl, dac_macro_cntl, tmp; ++ enum drm_connector_status found = connector_status_disconnected; ++ bool color = true; ++ ++ /* save the regs we need */ ++ vclk_ecp_cntl = RADEON_READ_PLL(dev_priv, RADEON_VCLK_ECP_CNTL); ++ crtc_ext_cntl = RADEON_READ(RADEON_CRTC_EXT_CNTL); ++ dac_ext_cntl = RADEON_READ(RADEON_DAC_EXT_CNTL); ++ dac_cntl = RADEON_READ(RADEON_DAC_CNTL); ++ dac_macro_cntl = RADEON_READ(RADEON_DAC_MACRO_CNTL); ++ ++ tmp = vclk_ecp_cntl & ++ ~(RADEON_PIXCLK_ALWAYS_ONb | RADEON_PIXCLK_DAC_ALWAYS_ONb); ++ RADEON_WRITE_PLL(dev_priv, RADEON_VCLK_ECP_CNTL, tmp); ++ ++ tmp = crtc_ext_cntl | RADEON_CRTC_CRT_ON; ++ RADEON_WRITE(RADEON_CRTC_EXT_CNTL, tmp); ++ ++ tmp = RADEON_DAC_FORCE_BLANK_OFF_EN | ++ RADEON_DAC_FORCE_DATA_EN; ++ ++ if (color) ++ tmp |= RADEON_DAC_FORCE_DATA_SEL_RGB; ++ else ++ tmp |= RADEON_DAC_FORCE_DATA_SEL_G; ++ ++ if (radeon_is_r300(dev_priv)) ++ tmp |= (0x1b6 << RADEON_DAC_FORCE_DATA_SHIFT); ++ else ++ tmp |= (0x180 << RADEON_DAC_FORCE_DATA_SHIFT); ++ ++ RADEON_WRITE(RADEON_DAC_EXT_CNTL, tmp); ++ ++ tmp = dac_cntl & ~(RADEON_DAC_RANGE_CNTL_MASK | RADEON_DAC_PDWN); ++ tmp |= RADEON_DAC_RANGE_CNTL_PS2 | RADEON_DAC_CMP_EN; ++ RADEON_WRITE(RADEON_DAC_CNTL, tmp); ++ ++ tmp &= ~(RADEON_DAC_PDWN_R | ++ RADEON_DAC_PDWN_G | ++ RADEON_DAC_PDWN_B); ++ ++ RADEON_WRITE(RADEON_DAC_MACRO_CNTL, tmp); ++ ++ udelay(2000); ++ ++ if (RADEON_READ(RADEON_DAC_CNTL) & RADEON_DAC_CMP_OUTPUT) ++ found = connector_status_connected; ++ ++ /* restore the regs we used */ ++ RADEON_WRITE(RADEON_DAC_CNTL, dac_cntl); ++ RADEON_WRITE(RADEON_DAC_MACRO_CNTL, dac_macro_cntl); ++ RADEON_WRITE(RADEON_DAC_EXT_CNTL, dac_ext_cntl); ++ RADEON_WRITE(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl); ++ RADEON_WRITE_PLL(dev_priv, RADEON_VCLK_ECP_CNTL, vclk_ecp_cntl); ++ ++ return found; ++} ++ ++static const struct drm_encoder_helper_funcs radeon_legacy_primary_dac_helper_funcs = { ++ .dpms = radeon_legacy_primary_dac_dpms, ++ .mode_fixup = radeon_legacy_primary_dac_mode_fixup, ++ .prepare = radeon_legacy_primary_dac_prepare, ++ .mode_set = radeon_legacy_primary_dac_mode_set, ++ .commit = radeon_legacy_primary_dac_commit, ++ .detect = radeon_legacy_primary_dac_detect, ++}; ++ ++ ++static const struct drm_encoder_funcs radeon_legacy_primary_dac_enc_funcs = { ++ .destroy = radeon_enc_destroy, ++}; ++ ++struct drm_encoder *radeon_encoder_legacy_primary_dac_add(struct drm_device *dev, int bios_index, int has_tv) ++{ ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ struct radeon_encoder *radeon_encoder; ++ struct drm_encoder *encoder; ++ ++ DRM_DEBUG("\n"); ++ ++ radeon_encoder = kzalloc(sizeof(struct radeon_encoder), GFP_KERNEL); ++ if (!radeon_encoder) { ++ return NULL; ++ } ++ ++ encoder = &radeon_encoder->base; ++ ++ encoder->possible_crtcs = 0x3; ++ encoder->possible_clones = 0; ++ drm_encoder_init(dev, encoder, &radeon_legacy_primary_dac_enc_funcs, ++ DRM_MODE_ENCODER_DAC); ++ ++ drm_encoder_helper_add(encoder, &radeon_legacy_primary_dac_helper_funcs); ++ ++ /* get the primary dac bg/adj vals from bios tables */ ++ if (!dev_priv->is_atom_bios) ++ radeon_combios_get_primary_dac_info(radeon_encoder); ++ ++ return encoder; ++} ++ ++ ++static bool radeon_legacy_tmds_int_mode_fixup(struct drm_encoder *encoder, ++ struct drm_display_mode *mode, ++ struct drm_display_mode *adjusted_mode) ++{ ++ return true; ++} ++ ++static void radeon_legacy_tmds_int_dpms(struct drm_encoder *encoder, int mode) ++{ ++ struct drm_device *dev = encoder->dev; ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ struct radeon_crtc *radeon_crtc; ++ uint32_t fp_gen_cntl = RADEON_READ(RADEON_FP_GEN_CNTL); ++ uint32_t bios_5_scratch, bios_6_scratch; ++ int crtc_id = 0; ++ DRM_DEBUG("\n"); ++ ++ if (encoder->crtc) { ++ radeon_crtc = to_radeon_crtc(encoder->crtc); ++ crtc_id = radeon_crtc->crtc_id; ++ } ++ ++ // FIXME atom/legacy cards like r4xx ++ bios_5_scratch = RADEON_READ(RADEON_BIOS_5_SCRATCH); ++ bios_6_scratch = RADEON_READ(RADEON_BIOS_6_SCRATCH); ++ ++ bios_5_scratch &= ~RADEON_DFP1_CRTC_MASK; ++ bios_5_scratch |= (crtc_id << RADEON_DFP1_CRTC_SHIFT); ++ ++ switch(mode) { ++ case DRM_MODE_DPMS_ON: ++ fp_gen_cntl |= (RADEON_FP_FPON | RADEON_FP_TMDS_EN); ++ bios_5_scratch |= RADEON_DFP1_ON; ++ bios_6_scratch |= RADEON_DFP_DPMS_ON; ++ break; ++ case DRM_MODE_DPMS_STANDBY: ++ case DRM_MODE_DPMS_SUSPEND: ++ case DRM_MODE_DPMS_OFF: ++ fp_gen_cntl &= ~(RADEON_FP_FPON | RADEON_FP_TMDS_EN); ++ bios_5_scratch &= ~RADEON_DFP1_ON; ++ bios_6_scratch &= ~RADEON_DFP_DPMS_ON; ++ break; ++ } ++ ++ RADEON_WRITE(RADEON_FP_GEN_CNTL, fp_gen_cntl); ++ ++ RADEON_WRITE(RADEON_BIOS_5_SCRATCH, bios_5_scratch); ++ RADEON_WRITE(RADEON_BIOS_6_SCRATCH, bios_6_scratch); ++} ++ ++static void radeon_legacy_tmds_int_prepare(struct drm_encoder *encoder) ++{ ++ struct drm_radeon_private *dev_priv = encoder->dev->dev_private; ++ // fix me: atom/legacy r4xx ++ if (!dev_priv->is_atom_bios) ++ radeon_combios_output_lock(encoder, true); ++ radeon_legacy_tmds_int_dpms(encoder, DRM_MODE_DPMS_OFF); ++} ++ ++static void radeon_legacy_tmds_int_commit(struct drm_encoder *encoder) ++{ ++ struct drm_radeon_private *dev_priv = encoder->dev->dev_private; ++ radeon_legacy_tmds_int_dpms(encoder, DRM_MODE_DPMS_ON); ++ // fix me: atom/legacy r4xx ++ if (!dev_priv->is_atom_bios) ++ radeon_combios_output_lock(encoder, true); ++} ++ ++static void radeon_legacy_tmds_int_mode_set(struct drm_encoder *encoder, ++ struct drm_display_mode *mode, ++ struct drm_display_mode *adjusted_mode) ++{ ++ struct drm_device *dev = encoder->dev; ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); ++ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); ++ uint32_t tmp, tmds_pll_cntl, tmds_transmitter_cntl, fp_gen_cntl; ++ int i; ++ ++ DRM_DEBUG("\n"); ++ ++ if (radeon_crtc->crtc_id == 0) ++ radeon_legacy_rmx_mode_set(encoder, mode, adjusted_mode); ++ ++ tmp = tmds_pll_cntl = RADEON_READ(RADEON_TMDS_PLL_CNTL); ++ tmp &= 0xfffff; ++ if (dev_priv->chip_family == CHIP_RV280) { ++ /* bit 22 of TMDS_PLL_CNTL is read-back inverted */ ++ tmp ^= (1 << 22); ++ tmds_pll_cntl ^= (1 << 22); ++ } ++ ++ for (i = 0; i < 4; i++) { ++ if (radeon_encoder->tmds_pll[i].freq == 0) ++ break; ++ if ((uint32_t)(mode->clock / 10) < radeon_encoder->tmds_pll[i].freq) { ++ tmp = radeon_encoder->tmds_pll[i].value ; ++ break; ++ } ++ } ++ ++ if (radeon_is_r300(dev_priv) || (dev_priv->chip_family == CHIP_RV280)) { ++ if (tmp & 0xfff00000) ++ tmds_pll_cntl = tmp; ++ else { ++ tmds_pll_cntl &= 0xfff00000; ++ tmds_pll_cntl |= tmp; ++ } ++ } else ++ tmds_pll_cntl = tmp; ++ ++ tmds_transmitter_cntl = RADEON_READ(RADEON_TMDS_TRANSMITTER_CNTL) & ++ ~(RADEON_TMDS_TRANSMITTER_PLLRST); ++ ++ if (dev_priv->chip_family == CHIP_R200 || ++ dev_priv->chip_family == CHIP_R100 || ++ radeon_is_r300(dev_priv)) ++ tmds_transmitter_cntl &= ~(RADEON_TMDS_TRANSMITTER_PLLEN); ++ else /* RV chips got this bit reversed */ ++ tmds_transmitter_cntl |= RADEON_TMDS_TRANSMITTER_PLLEN; ++ ++ fp_gen_cntl = (RADEON_READ(RADEON_FP_GEN_CNTL) | ++ (RADEON_FP_CRTC_DONT_SHADOW_VPAR | ++ RADEON_FP_CRTC_DONT_SHADOW_HEND)); ++ ++ fp_gen_cntl &= ~(RADEON_FP_FPON | RADEON_FP_TMDS_EN); ++ ++ if (1) // FIXME rgbBits == 8 ++ fp_gen_cntl |= RADEON_FP_PANEL_FORMAT; /* 24 bit format */ ++ else ++ fp_gen_cntl &= ~RADEON_FP_PANEL_FORMAT;/* 18 bit format */ ++ ++ if (radeon_crtc->crtc_id == 0) { ++ if (radeon_is_r300(dev_priv) || dev_priv->chip_family == CHIP_R200) { ++ fp_gen_cntl &= ~R200_FP_SOURCE_SEL_MASK; ++ if (radeon_encoder->flags & RADEON_USE_RMX) ++ fp_gen_cntl |= R200_FP_SOURCE_SEL_RMX; ++ else ++ fp_gen_cntl |= R200_FP_SOURCE_SEL_CRTC1; ++ } else ++ fp_gen_cntl |= RADEON_FP_SEL_CRTC1; ++ } else { ++ if (radeon_is_r300(dev_priv) || dev_priv->chip_family == CHIP_R200) { ++ fp_gen_cntl &= ~R200_FP_SOURCE_SEL_MASK; ++ fp_gen_cntl |= R200_FP_SOURCE_SEL_CRTC2; ++ } else ++ fp_gen_cntl |= RADEON_FP_SEL_CRTC2; ++ } ++ ++ RADEON_WRITE(RADEON_TMDS_PLL_CNTL, tmds_pll_cntl); ++ RADEON_WRITE(RADEON_TMDS_TRANSMITTER_CNTL, tmds_transmitter_cntl); ++ RADEON_WRITE(RADEON_FP_GEN_CNTL, fp_gen_cntl); ++} ++ ++static const struct drm_encoder_helper_funcs radeon_legacy_tmds_int_helper_funcs = { ++ .dpms = radeon_legacy_tmds_int_dpms, ++ .mode_fixup = radeon_legacy_tmds_int_mode_fixup, ++ .prepare = radeon_legacy_tmds_int_prepare, ++ .mode_set = radeon_legacy_tmds_int_mode_set, ++ .commit = radeon_legacy_tmds_int_commit, ++}; ++ ++ ++static const struct drm_encoder_funcs radeon_legacy_tmds_int_enc_funcs = { ++ .destroy = radeon_enc_destroy, ++}; ++ ++struct drm_encoder *radeon_encoder_legacy_tmds_int_add(struct drm_device *dev, int bios_index) ++{ ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ struct radeon_encoder *radeon_encoder; ++ struct drm_encoder *encoder; ++ ++ DRM_DEBUG("\n"); ++ ++ radeon_encoder = kzalloc(sizeof(struct radeon_encoder), GFP_KERNEL); ++ if (!radeon_encoder) { ++ return NULL; ++ } ++ ++ encoder = &radeon_encoder->base; ++ ++ encoder->possible_crtcs = 0x3; ++ encoder->possible_clones = 0; ++ drm_encoder_init(dev, encoder, &radeon_legacy_tmds_int_enc_funcs, ++ DRM_MODE_ENCODER_TMDS); ++ ++ drm_encoder_helper_add(encoder, &radeon_legacy_tmds_int_helper_funcs); ++ ++ if (dev_priv->is_atom_bios) ++ radeon_atombios_get_tmds_info(radeon_encoder); ++ else ++ radeon_combios_get_tmds_info(radeon_encoder); ++ ++ return encoder; ++} ++ ++static bool radeon_legacy_tmds_ext_mode_fixup(struct drm_encoder *encoder, ++ struct drm_display_mode *mode, ++ struct drm_display_mode *adjusted_mode) ++{ ++ return true; ++} ++ ++static void radeon_legacy_tmds_ext_dpms(struct drm_encoder *encoder, int mode) ++{ ++ struct drm_device *dev = encoder->dev; ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ struct radeon_crtc *radeon_crtc; ++ uint32_t fp2_gen_cntl = RADEON_READ(RADEON_FP2_GEN_CNTL); ++ uint32_t bios_5_scratch, bios_6_scratch; ++ int crtc_id = 0; ++ DRM_DEBUG("\n"); ++ ++ if (encoder->crtc) { ++ radeon_crtc = to_radeon_crtc(encoder->crtc); ++ crtc_id = radeon_crtc->crtc_id; ++ } ++ ++ // FIXME atom/legacy cards like r4xx ++ bios_5_scratch = RADEON_READ(RADEON_BIOS_5_SCRATCH); ++ bios_6_scratch = RADEON_READ(RADEON_BIOS_6_SCRATCH); ++ ++ bios_5_scratch &= ~RADEON_DFP2_CRTC_MASK; ++ bios_5_scratch |= (crtc_id << RADEON_DFP2_CRTC_SHIFT); ++ ++ switch(mode) { ++ case DRM_MODE_DPMS_ON: ++ fp2_gen_cntl &= ~RADEON_FP2_BLANK_EN; ++ fp2_gen_cntl |= (RADEON_FP2_ON | RADEON_FP2_DVO_EN); ++ bios_5_scratch |= RADEON_DFP2_ON; ++ bios_6_scratch |= RADEON_DFP_DPMS_ON; ++ break; ++ case DRM_MODE_DPMS_STANDBY: ++ case DRM_MODE_DPMS_SUSPEND: ++ case DRM_MODE_DPMS_OFF: ++ fp2_gen_cntl |= RADEON_FP2_BLANK_EN; ++ fp2_gen_cntl &= ~(RADEON_FP2_ON | RADEON_FP2_DVO_EN); ++ bios_5_scratch &= ~RADEON_DFP2_ON; ++ bios_6_scratch &= ~RADEON_DFP_DPMS_ON; ++ break; ++ } ++ ++ RADEON_WRITE(RADEON_FP2_GEN_CNTL, fp2_gen_cntl); ++ ++ RADEON_WRITE(RADEON_BIOS_5_SCRATCH, bios_5_scratch); ++ RADEON_WRITE(RADEON_BIOS_6_SCRATCH, bios_6_scratch); ++} ++ ++static void radeon_legacy_tmds_ext_prepare(struct drm_encoder *encoder) ++{ ++ struct drm_radeon_private *dev_priv = encoder->dev->dev_private; ++ // fix me: atom/legacy r4xx ++ if (!dev_priv->is_atom_bios) ++ radeon_combios_output_lock(encoder, true); ++ radeon_legacy_tmds_ext_dpms(encoder, DRM_MODE_DPMS_OFF); ++} ++ ++static void radeon_legacy_tmds_ext_commit(struct drm_encoder *encoder) ++{ ++ struct drm_radeon_private *dev_priv = encoder->dev->dev_private; ++ radeon_legacy_tmds_ext_dpms(encoder, DRM_MODE_DPMS_ON); ++ // fix me: atom/legacy r4xx ++ if (!dev_priv->is_atom_bios) ++ radeon_combios_output_lock(encoder, false); ++} ++ ++static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder, ++ struct drm_display_mode *mode, ++ struct drm_display_mode *adjusted_mode) ++{ ++ struct drm_device *dev = encoder->dev; ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); ++ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); ++ uint32_t fp2_gen_cntl; ++ ++ DRM_DEBUG("\n"); ++ ++ if (radeon_crtc->crtc_id == 0) ++ radeon_legacy_rmx_mode_set(encoder, mode, adjusted_mode); ++ ++ if (dev_priv->is_atom_bios) { ++ atombios_ext_tmds_setup(encoder, adjusted_mode); ++ fp2_gen_cntl = RADEON_READ(RADEON_FP2_GEN_CNTL); ++ } else { ++ fp2_gen_cntl = RADEON_READ(RADEON_FP2_GEN_CNTL); ++ ++ if (1) // FIXME rgbBits == 8 ++ fp2_gen_cntl |= RADEON_FP2_PANEL_FORMAT; /* 24 bit format, */ ++ else ++ fp2_gen_cntl &= ~RADEON_FP2_PANEL_FORMAT;/* 18 bit format, */ ++ ++ fp2_gen_cntl &= ~(RADEON_FP2_ON | ++ RADEON_FP2_DVO_EN | ++ RADEON_FP2_DVO_RATE_SEL_SDR); ++ ++ /* XXX: these are oem specific */ ++ if (radeon_is_r300(dev_priv)) { ++ if ((dev->pdev->device == 0x4850) && ++ (dev->pdev->subsystem_vendor == 0x1028) && ++ (dev->pdev->subsystem_device == 0x2001)) /* Dell Inspiron 8600 */ ++ fp2_gen_cntl |= R300_FP2_DVO_CLOCK_MODE_SINGLE; ++ else ++ fp2_gen_cntl |= RADEON_FP2_PAD_FLOP_EN | R300_FP2_DVO_CLOCK_MODE_SINGLE; ++ ++ /*if (mode->clock > 165000) ++ fp2_gen_cntl |= R300_FP2_DVO_DUAL_CHANNEL_EN;*/ ++ } ++ } ++ ++ if (radeon_crtc->crtc_id == 0) { ++ if ((dev_priv->chip_family == CHIP_R200) || radeon_is_r300(dev_priv)) { ++ fp2_gen_cntl &= ~R200_FP2_SOURCE_SEL_MASK; ++ if (radeon_encoder->flags & RADEON_USE_RMX) ++ fp2_gen_cntl |= R200_FP2_SOURCE_SEL_RMX; ++ else ++ fp2_gen_cntl |= R200_FP2_SOURCE_SEL_CRTC1; ++ } else ++ fp2_gen_cntl &= ~RADEON_FP2_SRC_SEL_CRTC2; ++ } else { ++ if ((dev_priv->chip_family == CHIP_R200) || radeon_is_r300(dev_priv)) { ++ fp2_gen_cntl &= ~R200_FP2_SOURCE_SEL_MASK; ++ fp2_gen_cntl |= R200_FP2_SOURCE_SEL_CRTC2; ++ } else ++ fp2_gen_cntl |= RADEON_FP2_SRC_SEL_CRTC2; ++ } ++ ++ RADEON_WRITE(RADEON_FP2_GEN_CNTL, fp2_gen_cntl); ++} ++ ++static const struct drm_encoder_helper_funcs radeon_legacy_tmds_ext_helper_funcs = { ++ .dpms = radeon_legacy_tmds_ext_dpms, ++ .mode_fixup = radeon_legacy_tmds_ext_mode_fixup, ++ .prepare = radeon_legacy_tmds_ext_prepare, ++ .mode_set = radeon_legacy_tmds_ext_mode_set, ++ .commit = radeon_legacy_tmds_ext_commit, ++}; ++ ++ ++static const struct drm_encoder_funcs radeon_legacy_tmds_ext_enc_funcs = { ++ .destroy = radeon_enc_destroy, ++}; ++ ++struct drm_encoder *radeon_encoder_legacy_tmds_ext_add(struct drm_device *dev, int bios_index) ++{ ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ struct radeon_encoder *radeon_encoder; ++ struct drm_encoder *encoder; ++ ++ DRM_DEBUG("\n"); ++ ++ radeon_encoder = kzalloc(sizeof(struct radeon_encoder), GFP_KERNEL); ++ if (!radeon_encoder) { ++ return NULL; ++ } ++ ++ encoder = &radeon_encoder->base; ++ ++ encoder->possible_crtcs = 0x3; ++ encoder->possible_clones = 0; ++ drm_encoder_init(dev, encoder, &radeon_legacy_tmds_ext_enc_funcs, ++ DRM_MODE_ENCODER_TMDS); ++ ++ drm_encoder_helper_add(encoder, &radeon_legacy_tmds_ext_helper_funcs); ++ ++ if (!dev_priv->is_atom_bios) ++ radeon_combios_get_ext_tmds_info(radeon_encoder); ++ return encoder; ++} ++ ++static bool radeon_legacy_tv_dac_mode_fixup(struct drm_encoder *encoder, ++ struct drm_display_mode *mode, ++ struct drm_display_mode *adjusted_mode) ++{ ++ return true; ++} ++ ++static void radeon_legacy_tv_dac_dpms(struct drm_encoder *encoder, int mode) ++{ ++ struct drm_device *dev = encoder->dev; ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ struct radeon_crtc *radeon_crtc; ++ uint32_t fp2_gen_cntl = 0, crtc2_gen_cntl = 0, tv_dac_cntl = 0; ++ //uint32_t tv_master_cntl = 0; ++ uint32_t bios_5_scratch, bios_6_scratch; ++ int crtc_id = 0; ++ ++ DRM_DEBUG("\n"); ++ ++ if (encoder->crtc) { ++ radeon_crtc = to_radeon_crtc(encoder->crtc); ++ crtc_id = radeon_crtc->crtc_id; ++ } ++ ++ // FIXME atom/legacy cards like r4xx ++ bios_5_scratch = RADEON_READ(RADEON_BIOS_5_SCRATCH); ++ bios_6_scratch = RADEON_READ(RADEON_BIOS_6_SCRATCH); ++ ++ bios_5_scratch &= ~RADEON_CRT2_CRTC_MASK; ++ bios_5_scratch |= (crtc_id << RADEON_CRT2_CRTC_SHIFT); ++ // FIXME TV ++ //bios_5_scratch &= ~RADEON_TV1_CRTC_MASK; ++ //bios_5_scratch |= (crtc_id << RADEON_TV1_CRTC_SHIFT); ++ ++ if (dev_priv->chip_family == CHIP_R200) ++ fp2_gen_cntl = RADEON_READ(RADEON_FP2_GEN_CNTL); ++ else { ++ crtc2_gen_cntl = RADEON_READ(RADEON_CRTC2_GEN_CNTL); ++ // FIXME TV ++ //tv_master_cntl = RADEON_READ(RADEON_TV_MASTER_CNTL); ++ tv_dac_cntl = RADEON_READ(RADEON_TV_DAC_CNTL); ++ } ++ ++ switch(mode) { ++ case DRM_MODE_DPMS_ON: ++ if (dev_priv->chip_family == CHIP_R200) ++ fp2_gen_cntl |= (RADEON_FP2_ON | RADEON_FP2_DVO_EN); ++ else { ++ crtc2_gen_cntl |= RADEON_CRTC2_CRT2_ON; ++ //tv_master_cntl |= RADEON_TV_ON; ++ if (dev_priv->chip_family == CHIP_R420 || ++ dev_priv->chip_family == CHIP_R423 || ++ dev_priv->chip_family == CHIP_RV410) ++ tv_dac_cntl &= ~(R420_TV_DAC_RDACPD | ++ R420_TV_DAC_GDACPD | ++ R420_TV_DAC_BDACPD | ++ RADEON_TV_DAC_BGSLEEP); ++ else ++ tv_dac_cntl &= ~(RADEON_TV_DAC_RDACPD | ++ RADEON_TV_DAC_GDACPD | ++ RADEON_TV_DAC_BDACPD | ++ RADEON_TV_DAC_BGSLEEP); ++ } ++ //bios_5_scratch |= RADEON_TV1_ON; ++ //bios_6_scratch |= RADEON_TV_DPMS_ON; ++ bios_5_scratch |= RADEON_CRT2_ON; ++ bios_6_scratch |= RADEON_CRT_DPMS_ON; ++ break; ++ case DRM_MODE_DPMS_STANDBY: ++ case DRM_MODE_DPMS_SUSPEND: ++ case DRM_MODE_DPMS_OFF: ++ if (dev_priv->chip_family == CHIP_R200) ++ fp2_gen_cntl &= ~(RADEON_FP2_ON | RADEON_FP2_DVO_EN); ++ else { ++ crtc2_gen_cntl &= ~RADEON_CRTC2_CRT2_ON; ++ //tv_master_cntl &= ~RADEON_TV_ON; ++ if (dev_priv->chip_family == CHIP_R420 || ++ dev_priv->chip_family == CHIP_R423 || ++ dev_priv->chip_family == CHIP_RV410) ++ tv_dac_cntl |= (R420_TV_DAC_RDACPD | ++ R420_TV_DAC_GDACPD | ++ R420_TV_DAC_BDACPD | ++ RADEON_TV_DAC_BGSLEEP); ++ else ++ tv_dac_cntl |= (RADEON_TV_DAC_RDACPD | ++ RADEON_TV_DAC_GDACPD | ++ RADEON_TV_DAC_BDACPD | ++ RADEON_TV_DAC_BGSLEEP); ++ } ++ //bios_5_scratch &= ~RADEON_TV1_ON; ++ //bios_6_scratch &= ~RADEON_TV_DPMS_ON; ++ bios_5_scratch &= ~RADEON_CRT2_ON; ++ bios_6_scratch &= ~RADEON_CRT_DPMS_ON; ++ break; ++ } ++ ++ if (dev_priv->chip_family == CHIP_R200) ++ RADEON_WRITE(RADEON_FP2_GEN_CNTL, fp2_gen_cntl); ++ else { ++ RADEON_WRITE(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl); ++ //RADEON_WRITE(RADEON_TV_MASTER_CNTL, tv_master_cntl); ++ RADEON_WRITE(RADEON_TV_DAC_CNTL, tv_dac_cntl); ++ } ++ ++ RADEON_WRITE(RADEON_BIOS_5_SCRATCH, bios_5_scratch); ++ RADEON_WRITE(RADEON_BIOS_6_SCRATCH, bios_6_scratch); ++} ++ ++static void radeon_legacy_tv_dac_prepare(struct drm_encoder *encoder) ++{ ++ // fix me: atom/legacy r4xx ++ radeon_combios_output_lock(encoder, true); ++ radeon_legacy_tv_dac_dpms(encoder, DRM_MODE_DPMS_OFF); ++} ++ ++static void radeon_legacy_tv_dac_commit(struct drm_encoder *encoder) ++{ ++ radeon_legacy_tv_dac_dpms(encoder, DRM_MODE_DPMS_ON); ++ // fix me: atom/legacy r4xx ++ radeon_combios_output_lock(encoder, false); ++} ++ ++static void radeon_legacy_tv_dac_mode_set(struct drm_encoder *encoder, ++ struct drm_display_mode *mode, ++ struct drm_display_mode *adjusted_mode) ++{ ++ struct drm_device *dev = encoder->dev; ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); ++ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); ++ uint32_t tv_dac_cntl, gpiopad_a = 0, dac2_cntl, disp_output_cntl = 0; ++ uint32_t disp_hw_debug = 0, fp2_gen_cntl = 0; ++ ++ DRM_DEBUG("\n"); ++ ++ if (radeon_crtc->crtc_id == 0) ++ radeon_legacy_rmx_mode_set(encoder, mode, adjusted_mode); ++ ++ if (dev_priv->chip_family != CHIP_R200) { ++ tv_dac_cntl = RADEON_READ(RADEON_TV_DAC_CNTL); ++ if (dev_priv->chip_family == CHIP_R420 || ++ dev_priv->chip_family == CHIP_R423 || ++ dev_priv->chip_family == CHIP_RV410) { ++ tv_dac_cntl &= ~(RADEON_TV_DAC_STD_MASK | ++ RADEON_TV_DAC_BGADJ_MASK | ++ R420_TV_DAC_DACADJ_MASK | ++ R420_TV_DAC_RDACPD | ++ R420_TV_DAC_GDACPD | ++ R420_TV_DAC_GDACPD | ++ R420_TV_DAC_TVENABLE); ++ } else { ++ tv_dac_cntl &= ~(RADEON_TV_DAC_STD_MASK | ++ RADEON_TV_DAC_BGADJ_MASK | ++ RADEON_TV_DAC_DACADJ_MASK | ++ RADEON_TV_DAC_RDACPD | ++ RADEON_TV_DAC_GDACPD | ++ RADEON_TV_DAC_GDACPD); ++ } ++ ++ // FIXME TV ++ tv_dac_cntl |= (RADEON_TV_DAC_NBLANK | ++ RADEON_TV_DAC_NHOLD | ++ RADEON_TV_DAC_STD_PS2 | ++ radeon_encoder->ps2_tvdac_adj); ++ ++ RADEON_WRITE(RADEON_TV_DAC_CNTL, tv_dac_cntl); ++ } ++ ++ if (radeon_is_r300(dev_priv)) { ++ gpiopad_a = RADEON_READ(RADEON_GPIOPAD_A) | 1; ++ disp_output_cntl = RADEON_READ(RADEON_DISP_OUTPUT_CNTL); ++ } else if (dev_priv->chip_family == CHIP_R200) ++ fp2_gen_cntl = RADEON_READ(RADEON_FP2_GEN_CNTL); ++ else ++ disp_hw_debug = RADEON_READ(RADEON_DISP_HW_DEBUG); ++ ++ dac2_cntl = RADEON_READ(RADEON_DAC_CNTL2) | RADEON_DAC2_DAC2_CLK_SEL; ++ ++ if (radeon_crtc->crtc_id == 0) { ++ if (radeon_is_r300(dev_priv)) { ++ disp_output_cntl &= ~RADEON_DISP_TVDAC_SOURCE_MASK; ++ disp_output_cntl |= RADEON_DISP_TVDAC_SOURCE_CRTC; ++ } else if (dev_priv->chip_family == CHIP_R200) { ++ fp2_gen_cntl &= ~(R200_FP2_SOURCE_SEL_MASK | ++ RADEON_FP2_DVO_RATE_SEL_SDR); ++ } else ++ disp_hw_debug |= RADEON_CRT2_DISP1_SEL; ++ } else { ++ if (radeon_is_r300(dev_priv)) { ++ disp_output_cntl &= ~RADEON_DISP_TVDAC_SOURCE_MASK; ++ disp_output_cntl |= RADEON_DISP_TVDAC_SOURCE_CRTC2; ++ } else if (dev_priv->chip_family == CHIP_R200) { ++ fp2_gen_cntl &= ~(R200_FP2_SOURCE_SEL_MASK | ++ RADEON_FP2_DVO_RATE_SEL_SDR); ++ fp2_gen_cntl |= R200_FP2_SOURCE_SEL_CRTC2; ++ } else ++ disp_hw_debug &= ~RADEON_CRT2_DISP1_SEL; ++ } ++ ++ RADEON_WRITE(RADEON_DAC_CNTL2, dac2_cntl); ++ ++ if (radeon_is_r300(dev_priv)) { ++ RADEON_WRITE_P(RADEON_GPIOPAD_A, gpiopad_a, ~1); ++ RADEON_WRITE(RADEON_DISP_TV_OUT_CNTL, disp_output_cntl); ++ } else if (dev_priv->chip_family == CHIP_R200) ++ RADEON_WRITE(RADEON_FP2_GEN_CNTL, fp2_gen_cntl); ++ else ++ RADEON_WRITE(RADEON_DISP_HW_DEBUG, disp_hw_debug); ++ ++} ++ ++static enum drm_connector_status radeon_legacy_tv_dac_detect(struct drm_encoder *encoder, ++ struct drm_connector *connector) ++{ ++ struct drm_device *dev = encoder->dev; ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ uint32_t crtc2_gen_cntl, tv_dac_cntl, dac_cntl2, dac_ext_cntl; ++ uint32_t disp_hw_debug, disp_output_cntl, gpiopad_a, pixclks_cntl, tmp; ++ enum drm_connector_status found = connector_status_disconnected; ++ bool color = true; ++ ++ // FIXME tv ++ ++ /* save the regs we need */ ++ pixclks_cntl = RADEON_READ_PLL(dev_priv, RADEON_PIXCLKS_CNTL); ++ gpiopad_a = radeon_is_r300(dev_priv) ? RADEON_READ(RADEON_GPIOPAD_A) : 0; ++ disp_output_cntl = radeon_is_r300(dev_priv) ? RADEON_READ(RADEON_DISP_OUTPUT_CNTL) : 0; ++ disp_hw_debug = radeon_is_r300(dev_priv) ? 0 : RADEON_READ(RADEON_DISP_HW_DEBUG); ++ crtc2_gen_cntl = RADEON_READ(RADEON_CRTC2_GEN_CNTL); ++ tv_dac_cntl = RADEON_READ(RADEON_TV_DAC_CNTL); ++ dac_ext_cntl = RADEON_READ(RADEON_DAC_EXT_CNTL); ++ dac_cntl2 = RADEON_READ(RADEON_DAC_CNTL2); ++ ++ tmp = pixclks_cntl & ~(RADEON_PIX2CLK_ALWAYS_ONb ++ | RADEON_PIX2CLK_DAC_ALWAYS_ONb); ++ RADEON_WRITE_PLL(dev_priv, RADEON_PIXCLKS_CNTL, tmp); ++ ++ if (radeon_is_r300(dev_priv)) ++ RADEON_WRITE_P(RADEON_GPIOPAD_A, 1, ~1); ++ ++ tmp = crtc2_gen_cntl & ~RADEON_CRTC2_PIX_WIDTH_MASK; ++ tmp |= RADEON_CRTC2_CRT2_ON | ++ (2 << RADEON_CRTC2_PIX_WIDTH_SHIFT); ++ ++ RADEON_WRITE(RADEON_CRTC2_GEN_CNTL, tmp); ++ ++ if (radeon_is_r300(dev_priv)) { ++ tmp = disp_output_cntl & ~RADEON_DISP_TVDAC_SOURCE_MASK; ++ tmp |= RADEON_DISP_TVDAC_SOURCE_CRTC2; ++ RADEON_WRITE(RADEON_DISP_OUTPUT_CNTL, tmp); ++ } else { ++ tmp = disp_hw_debug & ~RADEON_CRT2_DISP1_SEL; ++ RADEON_WRITE(RADEON_DISP_HW_DEBUG, tmp); ++ } ++ ++ tmp = RADEON_TV_DAC_NBLANK | ++ RADEON_TV_DAC_NHOLD | ++ RADEON_TV_MONITOR_DETECT_EN | ++ RADEON_TV_DAC_STD_PS2; ++ ++ RADEON_WRITE(RADEON_TV_DAC_CNTL, tmp); ++ ++ tmp = RADEON_DAC2_FORCE_BLANK_OFF_EN | ++ RADEON_DAC2_FORCE_DATA_EN; ++ ++ if (color) ++ tmp |= RADEON_DAC_FORCE_DATA_SEL_RGB; ++ else ++ tmp |= RADEON_DAC_FORCE_DATA_SEL_G; ++ ++ if (radeon_is_r300(dev_priv)) ++ tmp |= (0x1b6 << RADEON_DAC_FORCE_DATA_SHIFT); ++ else ++ tmp |= (0x180 << RADEON_DAC_FORCE_DATA_SHIFT); ++ ++ RADEON_WRITE(RADEON_DAC_EXT_CNTL, tmp); ++ ++ tmp = dac_cntl2 | RADEON_DAC2_DAC2_CLK_SEL | RADEON_DAC2_CMP_EN; ++ RADEON_WRITE(RADEON_DAC_CNTL2, tmp); ++ ++ udelay(10000); ++ ++ if (radeon_is_r300(dev_priv)) { ++ if (RADEON_READ(RADEON_DAC_CNTL2) & RADEON_DAC2_CMP_OUT_B) ++ found = connector_status_connected; ++ } else { ++ if (RADEON_READ(RADEON_DAC_CNTL2) & RADEON_DAC2_CMP_OUTPUT) ++ found = connector_status_connected; ++ } ++ ++ /* restore regs we used */ ++ RADEON_WRITE(RADEON_DAC_CNTL2, dac_cntl2); ++ RADEON_WRITE(RADEON_DAC_EXT_CNTL, dac_ext_cntl); ++ RADEON_WRITE(RADEON_TV_DAC_CNTL, tv_dac_cntl); ++ RADEON_WRITE(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl); ++ ++ if (radeon_is_r300(dev_priv)) { ++ RADEON_WRITE(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl); ++ RADEON_WRITE_P(RADEON_GPIOPAD_A, gpiopad_a, ~1 ); ++ } else { ++ RADEON_WRITE(RADEON_DISP_HW_DEBUG, disp_hw_debug); ++ } ++ RADEON_WRITE_PLL(dev_priv, RADEON_PIXCLKS_CNTL, pixclks_cntl); ++ ++ //return found; ++ return connector_status_disconnected; ++ ++} ++ ++static const struct drm_encoder_helper_funcs radeon_legacy_tv_dac_helper_funcs = { ++ .dpms = radeon_legacy_tv_dac_dpms, ++ .mode_fixup = radeon_legacy_tv_dac_mode_fixup, ++ .prepare = radeon_legacy_tv_dac_prepare, ++ .mode_set = radeon_legacy_tv_dac_mode_set, ++ .commit = radeon_legacy_tv_dac_commit, ++ .detect = radeon_legacy_tv_dac_detect, ++}; ++ ++ ++static const struct drm_encoder_funcs radeon_legacy_tv_dac_enc_funcs = { ++ .destroy = radeon_enc_destroy, ++}; ++ ++struct drm_encoder *radeon_encoder_legacy_tv_dac_add(struct drm_device *dev, int bios_index, int has_tv) ++{ ++ struct radeon_encoder *radeon_encoder; ++ struct drm_encoder *encoder; ++ ++ DRM_DEBUG("\n"); ++ ++ radeon_encoder = kzalloc(sizeof(struct radeon_encoder), GFP_KERNEL); ++ if (!radeon_encoder) { ++ return NULL; ++ } ++ ++ encoder = &radeon_encoder->base; ++ ++ encoder->possible_crtcs = 0x3; ++ encoder->possible_clones = 0; ++ drm_encoder_init(dev, encoder, &radeon_legacy_tv_dac_enc_funcs, ++ DRM_MODE_ENCODER_DAC); ++ ++ drm_encoder_helper_add(encoder, &radeon_legacy_tv_dac_helper_funcs); ++ ++ /* get the tv dac vals from bios tables */ ++ radeon_combios_get_tv_info(radeon_encoder); ++ radeon_combios_get_tv_dac_info(radeon_encoder); ++ ++ return encoder; ++} +diff --git a/drivers/gpu/drm/radeon/radeon_mem.c b/drivers/gpu/drm/radeon/radeon_mem.c +index 4af5286..40134c8 100644 +--- a/drivers/gpu/drm/radeon/radeon_mem.c ++++ b/drivers/gpu/drm/radeon/radeon_mem.c +@@ -294,7 +294,7 @@ int radeon_mem_init_heap(struct drm_device *dev, void *data, struct drm_file *fi + return -EFAULT; + + if (*heap) { +- DRM_ERROR("heap already initialized?"); ++ DRM_DEBUG("heap already initialized?\n"); + return -EFAULT; + } + +diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h +new file mode 100644 +index 0000000..9ba4688 +--- /dev/null ++++ b/drivers/gpu/drm/radeon/radeon_mode.h +@@ -0,0 +1,351 @@ ++/* ++ * Copyright 2000 ATI Technologies Inc., Markham, Ontario, and ++ * VA Linux Systems Inc., Fremont, California. ++ * Copyright 2008 Red Hat Inc. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ * ++ * Original Authors: ++ * Kevin E. Martin, Rickard E. Faith, Alan Hourihane ++ * ++ * Kernel port Author: Dave Airlie ++ */ ++ ++#ifndef RADEON_MODE_H ++#define RADEON_MODE_H ++ ++#include ++#include ++#include ++ ++#define to_radeon_crtc(x) container_of(x, struct radeon_crtc, base) ++#define to_radeon_connector(x) container_of(x, struct radeon_connector, base) ++#define to_radeon_encoder(x) container_of(x, struct radeon_encoder, base) ++#define to_radeon_framebuffer(x) container_of(x, struct radeon_framebuffer, base) ++ ++enum radeon_connector_type { ++ CONNECTOR_NONE, ++ CONNECTOR_VGA, ++ CONNECTOR_DVI_I, ++ CONNECTOR_DVI_D, ++ CONNECTOR_DVI_A, ++ CONNECTOR_STV, ++ CONNECTOR_CTV, ++ CONNECTOR_LVDS, ++ CONNECTOR_DIGITAL, ++ CONNECTOR_SCART, ++ CONNECTOR_HDMI_TYPE_A, ++ CONNECTOR_HDMI_TYPE_B, ++ CONNECTOR_0XC, ++ CONNECTOR_0XD, ++ CONNECTOR_DIN, ++ CONNECTOR_DISPLAY_PORT, ++ CONNECTOR_UNSUPPORTED ++}; ++ ++enum radeon_dac_type { ++ DAC_NONE = 0, ++ DAC_PRIMARY = 1, ++ DAC_TVDAC = 2, ++ DAC_EXT = 3 ++}; ++ ++enum radeon_tmds_type { ++ TMDS_NONE = 0, ++ TMDS_INT = 1, ++ TMDS_EXT = 2, ++ TMDS_LVTMA = 3, ++ TMDS_DDIA = 4, ++ TMDS_UNIPHY = 5 ++}; ++ ++enum radeon_dvi_type { ++ DVI_AUTO, ++ DVI_DIGITAL, ++ DVI_ANALOG ++}; ++ ++enum radeon_rmx_type { ++ RMX_OFF, ++ RMX_FULL, ++ RMX_CENTER, ++}; ++ ++enum radeon_tv_std { ++ TV_STD_NTSC, ++ TV_STD_PAL, ++ TV_STD_PAL_M, ++ TV_STD_PAL_60, ++ TV_STD_NTSC_J, ++ TV_STD_SCART_PAL, ++ TV_STD_SECAM, ++ TV_STD_PAL_CN, ++}; ++ ++struct radeon_i2c_bus_rec { ++ bool valid; ++ uint32_t mask_clk_reg; ++ uint32_t mask_data_reg; ++ uint32_t a_clk_reg; ++ uint32_t a_data_reg; ++ uint32_t put_clk_reg; ++ uint32_t put_data_reg; ++ uint32_t get_clk_reg; ++ uint32_t get_data_reg; ++ uint32_t mask_clk_mask; ++ uint32_t mask_data_mask; ++ uint32_t put_clk_mask; ++ uint32_t put_data_mask; ++ uint32_t get_clk_mask; ++ uint32_t get_data_mask; ++ uint32_t a_clk_mask; ++ uint32_t a_data_mask; ++}; ++ ++struct radeon_bios_connector { ++ enum radeon_dac_type dac_type; ++ enum radeon_tmds_type tmds_type; ++ enum radeon_connector_type connector_type; ++ bool valid; ++ int output_id; ++ int devices; ++ int hpd_mask; ++ struct radeon_i2c_bus_rec ddc_i2c; ++ int igp_lane_info; ++}; ++ ++struct radeon_tmds_pll { ++ uint32_t freq; ++ uint32_t value; ++}; ++ ++#define RADEON_MAX_BIOS_CONNECTOR 16 ++ ++#define RADEON_PLL_USE_BIOS_DIVS (1 << 0) ++#define RADEON_PLL_NO_ODD_POST_DIV (1 << 1) ++#define RADEON_PLL_USE_REF_DIV (1 << 2) ++#define RADEON_PLL_LEGACY (1 << 3) ++#define RADEON_PLL_PREFER_LOW_REF_DIV (1 << 4) ++#define RADEON_PLL_PREFER_HIGH_REF_DIV (1 << 5) ++#define RADEON_PLL_PREFER_LOW_FB_DIV (1 << 6) ++#define RADEON_PLL_PREFER_HIGH_FB_DIV (1 << 7) ++#define RADEON_PLL_PREFER_LOW_POST_DIV (1 << 8) ++#define RADEON_PLL_PREFER_HIGH_POST_DIV (1 << 9) ++ ++struct radeon_pll { ++ uint16_t reference_freq; ++ uint16_t reference_div; ++ uint32_t pll_in_min; ++ uint32_t pll_in_max; ++ uint32_t pll_out_min; ++ uint32_t pll_out_max; ++ uint16_t xclk; ++ ++ uint32_t min_ref_div; ++ uint32_t max_ref_div; ++ uint32_t min_post_div; ++ uint32_t max_post_div; ++ uint32_t min_feedback_div; ++ uint32_t max_feedback_div; ++ uint32_t best_vco; ++}; ++ ++struct radeon_i2c_chan { ++ struct drm_device *dev; ++ struct i2c_adapter adapter; ++ struct i2c_algo_bit_data algo; ++ struct radeon_i2c_bus_rec rec; ++}; ++ ++struct radeon_mode_info { ++ struct atom_context *atom_context; ++ struct radeon_bios_connector bios_connector[RADEON_MAX_BIOS_CONNECTOR]; ++ struct radeon_pll p1pll; ++ struct radeon_pll p2pll; ++ struct radeon_pll spll; ++ struct radeon_pll mpll; ++ uint32_t mclk; ++ uint32_t sclk; ++}; ++ ++struct radeon_crtc { ++ struct drm_crtc base; ++ int crtc_id; ++ u8 lut_r[256], lut_g[256], lut_b[256]; ++ bool enabled; ++ bool can_tile; ++ uint32_t crtc_offset; ++ struct radeon_framebuffer *fbdev_fb; ++ struct drm_mode_set mode_set; ++}; ++ ++#define RADEON_USE_RMX 1 ++ ++struct radeon_encoder { ++ struct drm_encoder base; ++ uint32_t encoder_mode; ++ uint32_t flags; ++ enum radeon_rmx_type rmx_type; ++ union { ++ enum radeon_dac_type dac; ++ enum radeon_tmds_type tmds; ++ } type; ++ int atom_device; /* atom devices */ ++ ++ /* preferred mode */ ++ uint32_t panel_xres, panel_yres; ++ uint32_t hoverplus, hsync_width; ++ uint32_t hblank; ++ uint32_t voverplus, vsync_width; ++ uint32_t vblank; ++ uint32_t dotclock; ++ ++ /* legacy lvds */ ++ uint16_t panel_vcc_delay; ++ uint16_t panel_pwr_delay; ++ uint16_t panel_digon_delay; ++ uint16_t panel_blon_delay; ++ uint32_t panel_ref_divider; ++ uint32_t panel_post_divider; ++ uint32_t panel_fb_divider; ++ bool use_bios_dividers; ++ uint32_t lvds_gen_cntl; ++ ++ /* legacy primary dac */ ++ uint32_t ps2_pdac_adj; ++ ++ /* legacy tv dac */ ++ uint32_t ps2_tvdac_adj; ++ uint32_t ntsc_tvdac_adj; ++ uint32_t pal_tvdac_adj; ++ enum radeon_tv_std tv_std; ++ ++ /* legacy int tmds */ ++ struct radeon_tmds_pll tmds_pll[4]; ++}; ++ ++struct radeon_connector { ++ struct drm_connector base; ++ struct radeon_i2c_chan *ddc_bus; ++ int use_digital; ++}; ++ ++struct radeon_framebuffer { ++ struct drm_framebuffer base; ++ struct drm_bo_kmap_obj kmap_obj; ++ struct drm_gem_object *obj; ++}; ++ ++extern struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev, ++ struct radeon_i2c_bus_rec *rec, ++ const char *name); ++extern void radeon_i2c_destroy(struct radeon_i2c_chan *i2c); ++extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector); ++extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector); ++extern struct drm_connector *radeon_connector_add(struct drm_device *dev, int bios_index); ++ ++extern struct drm_encoder *radeon_best_encoder(struct drm_connector *connector); ++ ++extern void radeon_compute_pll(struct radeon_pll *pll, ++ uint64_t freq, ++ uint32_t *dot_clock_p, ++ uint32_t *fb_div_p, ++ uint32_t *ref_div_p, ++ uint32_t *post_div_p, ++ int flags); ++ ++struct drm_encoder *radeon_encoder_lvtma_add(struct drm_device *dev, int bios_index); ++struct drm_encoder *radeon_encoder_atom_dac_add(struct drm_device *dev, int bios_index, int dac_id, int with_tv); ++struct drm_encoder *radeon_encoder_atom_tmds_add(struct drm_device *dev, int bios_index, int tmds_type); ++struct drm_encoder *radeon_encoder_legacy_lvds_add(struct drm_device *dev, int bios_index); ++struct drm_encoder *radeon_encoder_legacy_primary_dac_add(struct drm_device *dev, int bios_index, int with_tv); ++struct drm_encoder *radeon_encoder_legacy_tv_dac_add(struct drm_device *dev, int bios_index, int with_tv); ++struct drm_encoder *radeon_encoder_legacy_tmds_int_add(struct drm_device *dev, int bios_index); ++struct drm_encoder *radeon_encoder_legacy_tmds_ext_add(struct drm_device *dev, int bios_index); ++extern void atombios_ext_tmds_setup(struct drm_encoder *encoder, ++ struct drm_display_mode *mode); ++ ++extern void radeon_crtc_load_lut(struct drm_crtc *crtc); ++extern void atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y); ++extern void atombios_crtc_mode_set(struct drm_crtc *crtc, ++ struct drm_display_mode *mode, ++ struct drm_display_mode *adjusted_mode, ++ int x, int y); ++extern void atombios_crtc_dpms(struct drm_crtc *crtc, int mode); ++ ++extern void radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y); ++extern void radeon_legacy_atom_set_surface(struct drm_crtc *crtc); ++ ++extern int radeon_crtc_cursor_set(struct drm_crtc *crtc, ++ struct drm_file *file_priv, ++ uint32_t handle, ++ uint32_t width, ++ uint32_t height); ++extern int radeon_crtc_cursor_move(struct drm_crtc *crtc, ++ int x, int y); ++ ++extern bool radeon_atom_get_clock_info(struct drm_device *dev); ++extern bool radeon_combios_get_clock_info(struct drm_device *dev); ++extern void radeon_atombios_get_lvds_info(struct radeon_encoder *encoder); ++extern void radeon_atombios_get_tmds_info(struct radeon_encoder *encoder); ++extern bool radeon_combios_get_lvds_info(struct radeon_encoder *encoder); ++extern bool radeon_combios_get_tmds_info(struct radeon_encoder *encoder); ++extern void radeon_combios_get_ext_tmds_info(struct radeon_encoder *encoder); ++extern bool radeon_combios_get_tv_info(struct radeon_encoder *encoder); ++extern bool radeon_combios_get_tv_dac_info(struct radeon_encoder *encoder); ++extern bool radeon_combios_get_primary_dac_info(struct radeon_encoder *encoder); ++extern void radeon_combios_output_lock(struct drm_encoder *encoder, bool lock); ++extern void radeon_combios_initialize_bios_scratch_regs(struct drm_device *dev); ++extern void radeon_atom_output_lock(struct drm_encoder *encoder, bool lock); ++extern void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev); ++extern void radeon_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, ++ u16 blue, int regno); ++struct drm_framebuffer *radeon_framebuffer_create(struct drm_device *dev, ++ struct drm_mode_fb_cmd *mode_cmd, ++ struct drm_gem_object *obj); ++ ++int radeonfb_probe(struct drm_device *dev); ++ ++int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb); ++bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev); ++void radeon_atombios_init_crtc(struct drm_device *dev, ++ struct radeon_crtc *radeon_crtc); ++void radeon_legacy_init_crtc(struct drm_device *dev, ++ struct radeon_crtc *radeon_crtc); ++void radeon_i2c_do_lock(struct radeon_connector *radeon_connector, int lock_state); ++ ++void radeon_atom_static_pwrmgt_setup(struct drm_device *dev, int enable); ++void radeon_atom_dyn_clk_setup(struct drm_device *dev, int enable); ++void radeon_combios_dyn_clk_setup(struct drm_device *dev, int enable); ++void radeon_get_clock_info(struct drm_device *dev); ++extern bool radeon_get_atom_connector_info_from_bios_connector_table(struct drm_device *dev); ++ ++void radeon_rmx_mode_fixup(struct drm_encoder *encoder, ++ struct drm_display_mode *mode, ++ struct drm_display_mode *adjusted_mode); ++void radeon_enc_destroy(struct drm_encoder *encoder); ++void radeon_emit_copy_blit(struct drm_device * dev, ++ uint32_t src_offset, ++ uint32_t dst_offset, ++ uint32_t pages); ++void radeon_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj); ++void radeon_combios_asic_init(struct drm_device *dev); ++extern int radeon_static_clocks_init(struct drm_device *dev); ++ ++#endif +diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c +new file mode 100644 +index 0000000..af348ae +--- /dev/null ++++ b/drivers/gpu/drm/radeon/radeon_pm.c +@@ -0,0 +1,248 @@ ++/* ++ * Copyright 2007-8 Advanced Micro Devices, Inc. ++ * Copyright 2008 Red Hat Inc. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: Dave Airlie ++ * Alex Deucher ++ */ ++#include "drmP.h" ++#include "radeon_drm.h" ++#include "radeon_drv.h" ++ ++#include "atom.h" ++ ++#include "drm_crtc_helper.h" ++ ++int radeon_suspend(struct drm_device *dev, pm_message_t state) ++{ ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ struct drm_framebuffer *fb; ++ int i; ++ ++ if (!dev || !dev_priv) { ++ return -ENODEV; ++ } ++ ++ if (state.event == PM_EVENT_PRETHAW) ++ return 0; ++ ++ if (!drm_core_check_feature(dev, DRIVER_MODESET)) ++ return 0; ++ ++ /* unpin the front buffers */ ++ list_for_each_entry(fb, &dev->mode_config.fb_kernel_list, filp_head) { ++ struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb); ++ ++ if (!radeon_fb) ++ continue; ++ ++ if (!radeon_fb->obj); ++ continue; ++ ++ radeon_gem_object_unpin(radeon_fb->obj); ++ } ++ ++ if (!(dev_priv->flags & RADEON_IS_IGP)) ++ drm_bo_evict_mm(dev, DRM_BO_MEM_VRAM, 0); ++ ++ dev_priv->pmregs.crtc_ext_cntl = RADEON_READ(RADEON_CRTC_EXT_CNTL); ++ for (i = 0; i < 8; i++) ++ dev_priv->pmregs.bios_scratch[i] = RADEON_READ(RADEON_BIOS_0_SCRATCH + (i * 4)); ++ ++ radeon_modeset_cp_suspend(dev); ++ ++ /* Disable *all* interrupts */ ++ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) ++ RADEON_WRITE(R500_DxMODE_INT_MASK, 0); ++ RADEON_WRITE(RADEON_GEN_INT_CNTL, 0); ++ ++ if (dev_priv->flags & RADEON_IS_PCIE) { ++ memcpy_fromio(dev_priv->mm.pcie_table_backup, dev_priv->mm.pcie_table.kmap.virtual, dev_priv->gart_info.table_size); ++ } ++ ++ pci_save_state(dev->pdev); ++ ++ if (state.event == PM_EVENT_SUSPEND) { ++ /* Shut down the device */ ++ pci_disable_device(dev->pdev); ++ pci_set_power_state(dev->pdev, PCI_D3hot); ++ } ++ return 0; ++} ++ ++int radeon_resume(struct drm_device *dev) ++{ ++ struct drm_radeon_private *dev_priv = dev->dev_private; ++ struct drm_framebuffer *fb; ++ int i; ++ ++ if (!drm_core_check_feature(dev, DRIVER_MODESET)) ++ return 0; ++ ++ pci_set_power_state(dev->pdev, PCI_D0); ++ pci_restore_state(dev->pdev); ++ if (pci_enable_device(dev->pdev)) ++ return -1; ++ ++ /* Turn on bus mastering -todo fix properly */ ++ radeon_enable_bm(dev_priv); ++ ++ DRM_ERROR("\n"); ++ /* on atom cards re init the whole card ++ and set the modes again */ ++ ++ if (dev_priv->is_atom_bios) { ++ struct atom_context *ctx = dev_priv->mode_info.atom_context; ++ atom_asic_init(ctx); ++ } else { ++ radeon_combios_asic_init(dev); ++ } ++ ++ pci_set_master(dev->pdev); ++ ++ for (i = 0; i < 8; i++) ++ RADEON_WRITE(RADEON_BIOS_0_SCRATCH + (i * 4), dev_priv->pmregs.bios_scratch[i]); ++ ++ /* VGA render mayhaps */ ++ if (dev_priv->chip_family >= CHIP_RS600) { ++ uint32_t tmp; ++ ++ RADEON_WRITE(AVIVO_D1VGA_CONTROL, 0); ++ RADEON_WRITE(AVIVO_D2VGA_CONTROL, 0); ++ tmp = RADEON_READ(0x300); ++ tmp &= ~(3 << 16); ++ RADEON_WRITE(0x300, tmp); ++ RADEON_WRITE(0x308, (1 << 8)); ++ RADEON_WRITE(0x310, dev_priv->fb_location); ++ RADEON_WRITE(0x594, 0); ++ } ++ ++ RADEON_WRITE(RADEON_CRTC_EXT_CNTL, dev_priv->pmregs.crtc_ext_cntl); ++ ++ radeon_static_clocks_init(dev); ++ ++ radeon_init_memory_map(dev); ++ ++ if (dev_priv->flags & RADEON_IS_PCIE) { ++ memcpy_toio(dev_priv->mm.pcie_table.kmap.virtual, dev_priv->mm.pcie_table_backup, dev_priv->gart_info.table_size); ++ } ++ ++ if (dev_priv->mm.ring.kmap.virtual) ++ memset(dev_priv->mm.ring.kmap.virtual, 0, RADEON_DEFAULT_RING_SIZE); ++ ++ if (dev_priv->mm.ring_read.kmap.virtual) ++ memset(dev_priv->mm.ring_read.kmap.virtual, 0, PAGE_SIZE); ++ ++ radeon_modeset_cp_resume(dev); ++ ++ /* reset swi reg */ ++ RADEON_WRITE(RADEON_LAST_SWI_REG, dev_priv->counter); ++ ++ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) ++ RADEON_WRITE(R500_DxMODE_INT_MASK, dev_priv->r500_disp_irq_reg); ++ RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg); ++ ++ /* reset the context for userspace */ ++ if (dev->primary->master) { ++ struct drm_radeon_master_private *master_priv = dev->primary->master->driver_priv; ++ if (master_priv->sarea_priv) ++ master_priv->sarea_priv->ctx_owner = 0; ++ } ++ ++ /* pin the front buffers */ ++ list_for_each_entry(fb, &dev->mode_config.fb_kernel_list, filp_head) { ++ ++ struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb); ++ ++ if (!radeon_fb) ++ continue; ++ ++ if (!radeon_fb->obj) ++ continue; ++ ++ radeon_gem_object_pin(radeon_fb->obj, ++ PAGE_SIZE, RADEON_GEM_DOMAIN_VRAM); ++ } ++ /* blat the mode back in */ ++ drm_helper_resume_force_mode(dev); ++ ++ return 0; ++} ++ ++bool radeon_set_pcie_lanes(struct drm_device *dev, int lanes) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ uint32_t link_width_cntl, mask; ++ ++ /* FIXME wait for idle */ ++ ++ ++ switch (lanes) { ++ case 0: ++ mask = RADEON_PCIE_LC_LINK_WIDTH_X0; ++ break; ++ case 1: ++ mask = RADEON_PCIE_LC_LINK_WIDTH_X1; ++ break; ++ case 2: ++ mask = RADEON_PCIE_LC_LINK_WIDTH_X2; ++ break; ++ case 4: ++ mask = RADEON_PCIE_LC_LINK_WIDTH_X4; ++ break; ++ case 8: ++ mask = RADEON_PCIE_LC_LINK_WIDTH_X8; ++ break; ++ case 12: ++ mask = RADEON_PCIE_LC_LINK_WIDTH_X12; ++ break; ++ case 16: ++ default: ++ mask = RADEON_PCIE_LC_LINK_WIDTH_X16; ++ break; ++ } ++ ++ link_width_cntl = RADEON_READ_PCIE(dev_priv, RADEON_PCIE_LC_LINK_WIDTH_CNTL); ++ ++ if ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) == ++ (mask << RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT)) ++ return true; ++ ++ link_width_cntl &= ~(RADEON_PCIE_LC_LINK_WIDTH_MASK | ++ RADEON_PCIE_LC_RECONFIG_NOW | ++ RADEON_PCIE_LC_RECONFIG_LATER | ++ RADEON_PCIE_LC_SHORT_RECONFIG_EN); ++ link_width_cntl |= mask; ++ RADEON_WRITE_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); ++ RADEON_WRITE_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl | RADEON_PCIE_LC_RECONFIG_NOW); ++ ++ /* wait for lane set to complete */ ++ link_width_cntl = RADEON_READ_PCIE(dev_priv, RADEON_PCIE_LC_LINK_WIDTH_CNTL); ++ while (link_width_cntl == 0xffffffff) ++ link_width_cntl = RADEON_READ_PCIE(dev_priv, RADEON_PCIE_LC_LINK_WIDTH_CNTL); ++ ++ if ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) == ++ (mask << RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT)) ++ return true; ++ else ++ return false; ++} ++ +diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h +new file mode 100644 +index 0000000..3341d38 +--- /dev/null ++++ b/drivers/gpu/drm/radeon/radeon_reg.h +@@ -0,0 +1,5343 @@ ++/* ++ * Copyright 2000 ATI Technologies Inc., Markham, Ontario, and ++ * VA Linux Systems Inc., Fremont, California. ++ * ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining ++ * a copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation on the rights to use, copy, modify, merge, ++ * publish, distribute, sublicense, and/or sell copies of the Software, ++ * and to permit persons to whom the Software is furnished to do so, ++ * subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial ++ * portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND ++ * NON-INFRINGEMENT. IN NO EVENT SHALL ATI, VA LINUX SYSTEMS AND/OR ++ * THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, ++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, ++ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ */ ++ ++/* ++ * Authors: ++ * Kevin E. Martin ++ * Rickard E. Faith ++ * Alan Hourihane ++ * ++ * References: ++ * ++ * !!!! FIXME !!!! ++ * RAGE 128 VR/ RAGE 128 GL Register Reference Manual (Technical ++ * Reference Manual P/N RRG-G04100-C Rev. 0.04), ATI Technologies: April ++ * 1999. ++ * ++ * !!!! FIXME !!!! ++ * RAGE 128 Software Development Manual (Technical Reference Manual P/N ++ * SDK-G04000 Rev. 0.01), ATI Technologies: June 1999. ++ * ++ */ ++ ++/* !!!! FIXME !!!! NOTE: THIS FILE HAS BEEN CONVERTED FROM r128_reg.h ++ * AND CONTAINS REGISTERS AND REGISTER DEFINITIONS THAT ARE NOT CORRECT ++ * ON THE RADEON. A FULL AUDIT OF THIS CODE IS NEEDED! */ ++ ++#ifndef _RADEON_REG_H_ ++#define _RADEON_REG_H_ ++ ++#define ATI_DATATYPE_VQ 0 ++#define ATI_DATATYPE_CI4 1 ++#define ATI_DATATYPE_CI8 2 ++#define ATI_DATATYPE_ARGB1555 3 ++#define ATI_DATATYPE_RGB565 4 ++#define ATI_DATATYPE_RGB888 5 ++#define ATI_DATATYPE_ARGB8888 6 ++#define ATI_DATATYPE_RGB332 7 ++#define ATI_DATATYPE_Y8 8 ++#define ATI_DATATYPE_RGB8 9 ++#define ATI_DATATYPE_CI16 10 ++#define ATI_DATATYPE_VYUY_422 11 ++#define ATI_DATATYPE_YVYU_422 12 ++#define ATI_DATATYPE_AYUV_444 14 ++#define ATI_DATATYPE_ARGB4444 15 ++ ++ /* Registers for 2D/Video/Overlay */ ++#define RADEON_ADAPTER_ID 0x0f2c /* PCI */ ++#define RADEON_AGP_BASE 0x0170 ++#define RADEON_AGP_CNTL 0x0174 ++# define RADEON_AGP_APER_SIZE_256MB (0x00 << 0) ++# define RADEON_AGP_APER_SIZE_128MB (0x20 << 0) ++# define RADEON_AGP_APER_SIZE_64MB (0x30 << 0) ++# define RADEON_AGP_APER_SIZE_32MB (0x38 << 0) ++# define RADEON_AGP_APER_SIZE_16MB (0x3c << 0) ++# define RADEON_AGP_APER_SIZE_8MB (0x3e << 0) ++# define RADEON_AGP_APER_SIZE_4MB (0x3f << 0) ++# define RADEON_AGP_APER_SIZE_MASK (0x3f << 0) ++#define RADEON_STATUS_PCI_CONFIG 0x06 ++# define RADEON_CAP_LIST 0x100000 ++#define RADEON_CAPABILITIES_PTR_PCI_CONFIG 0x34 /* offset in PCI config*/ ++# define RADEON_CAP_PTR_MASK 0xfc /* mask off reserved bits of CAP_PTR */ ++# define RADEON_CAP_ID_NULL 0x00 /* End of capability list */ ++# define RADEON_CAP_ID_AGP 0x02 /* AGP capability ID */ ++# define RADEON_CAP_ID_EXP 0x10 /* PCI Express */ ++#define RADEON_AGP_COMMAND 0x0f60 /* PCI */ ++#define RADEON_AGP_COMMAND_PCI_CONFIG 0x0060 /* offset in PCI config*/ ++# define RADEON_AGP_ENABLE (1<<8) ++#define RADEON_AGP_PLL_CNTL 0x000b /* PLL */ ++#define RADEON_AGP_STATUS 0x0f5c /* PCI */ ++# define RADEON_AGP_1X_MODE 0x01 ++# define RADEON_AGP_2X_MODE 0x02 ++# define RADEON_AGP_4X_MODE 0x04 ++# define RADEON_AGP_FW_MODE 0x10 ++# define RADEON_AGP_MODE_MASK 0x17 ++# define RADEON_AGPv3_MODE 0x08 ++# define RADEON_AGPv3_4X_MODE 0x01 ++# define RADEON_AGPv3_8X_MODE 0x02 ++#define RADEON_ATTRDR 0x03c1 /* VGA */ ++#define RADEON_ATTRDW 0x03c0 /* VGA */ ++#define RADEON_ATTRX 0x03c0 /* VGA */ ++#define RADEON_AUX_SC_CNTL 0x1660 ++# define RADEON_AUX1_SC_EN (1 << 0) ++# define RADEON_AUX1_SC_MODE_OR (0 << 1) ++# define RADEON_AUX1_SC_MODE_NAND (1 << 1) ++# define RADEON_AUX2_SC_EN (1 << 2) ++# define RADEON_AUX2_SC_MODE_OR (0 << 3) ++# define RADEON_AUX2_SC_MODE_NAND (1 << 3) ++# define RADEON_AUX3_SC_EN (1 << 4) ++# define RADEON_AUX3_SC_MODE_OR (0 << 5) ++# define RADEON_AUX3_SC_MODE_NAND (1 << 5) ++#define RADEON_AUX1_SC_BOTTOM 0x1670 ++#define RADEON_AUX1_SC_LEFT 0x1664 ++#define RADEON_AUX1_SC_RIGHT 0x1668 ++#define RADEON_AUX1_SC_TOP 0x166c ++#define RADEON_AUX2_SC_BOTTOM 0x1680 ++#define RADEON_AUX2_SC_LEFT 0x1674 ++#define RADEON_AUX2_SC_RIGHT 0x1678 ++#define RADEON_AUX2_SC_TOP 0x167c ++#define RADEON_AUX3_SC_BOTTOM 0x1690 ++#define RADEON_AUX3_SC_LEFT 0x1684 ++#define RADEON_AUX3_SC_RIGHT 0x1688 ++#define RADEON_AUX3_SC_TOP 0x168c ++#define RADEON_AUX_WINDOW_HORZ_CNTL 0x02d8 ++#define RADEON_AUX_WINDOW_VERT_CNTL 0x02dc ++ ++#define RADEON_BASE_CODE 0x0f0b ++#define RADEON_BIOS_0_SCRATCH 0x0010 ++# define RADEON_FP_PANEL_SCALABLE (1 << 16) ++# define RADEON_FP_PANEL_SCALE_EN (1 << 17) ++# define RADEON_FP_CHIP_SCALE_EN (1 << 18) ++# define RADEON_DRIVER_BRIGHTNESS_EN (1 << 26) ++# define RADEON_DISPLAY_ROT_MASK (3 << 28) ++# define RADEON_DISPLAY_ROT_00 (0 << 28) ++# define RADEON_DISPLAY_ROT_90 (1 << 28) ++# define RADEON_DISPLAY_ROT_180 (2 << 28) ++# define RADEON_DISPLAY_ROT_270 (3 << 28) ++#define RADEON_BIOS_1_SCRATCH 0x0014 ++#define RADEON_BIOS_2_SCRATCH 0x0018 ++#define RADEON_BIOS_3_SCRATCH 0x001c ++#define RADEON_BIOS_4_SCRATCH 0x0020 ++# define RADEON_CRT1_ATTACHED_MASK (3 << 0) ++# define RADEON_CRT1_ATTACHED_MONO (1 << 0) ++# define RADEON_CRT1_ATTACHED_COLOR (2 << 0) ++# define RADEON_LCD1_ATTACHED (1 << 2) ++# define RADEON_DFP1_ATTACHED (1 << 3) ++# define RADEON_TV1_ATTACHED_MASK (3 << 4) ++# define RADEON_TV1_ATTACHED_COMP (1 << 4) ++# define RADEON_TV1_ATTACHED_SVIDEO (2 << 4) ++# define RADEON_CRT2_ATTACHED_MASK (3 << 8) ++# define RADEON_CRT2_ATTACHED_MONO (1 << 8) ++# define RADEON_CRT2_ATTACHED_COLOR (2 << 8) ++# define RADEON_DFP2_ATTACHED (1 << 11) ++#define RADEON_BIOS_5_SCRATCH 0x0024 ++# define RADEON_LCD1_ON (1 << 0) ++# define RADEON_CRT1_ON (1 << 1) ++# define RADEON_TV1_ON (1 << 2) ++# define RADEON_DFP1_ON (1 << 3) ++# define RADEON_CRT2_ON (1 << 5) ++# define RADEON_CV1_ON (1 << 6) ++# define RADEON_DFP2_ON (1 << 7) ++# define RADEON_LCD1_CRTC_MASK (1 << 8) ++# define RADEON_LCD1_CRTC_SHIFT 8 ++# define RADEON_CRT1_CRTC_MASK (1 << 9) ++# define RADEON_CRT1_CRTC_SHIFT 9 ++# define RADEON_TV1_CRTC_MASK (1 << 10) ++# define RADEON_TV1_CRTC_SHIFT 10 ++# define RADEON_DFP1_CRTC_MASK (1 << 11) ++# define RADEON_DFP1_CRTC_SHIFT 11 ++# define RADEON_CRT2_CRTC_MASK (1 << 12) ++# define RADEON_CRT2_CRTC_SHIFT 12 ++# define RADEON_CV1_CRTC_MASK (1 << 13) ++# define RADEON_CV1_CRTC_SHIFT 13 ++# define RADEON_DFP2_CRTC_MASK (1 << 14) ++# define RADEON_DFP2_CRTC_SHIFT 14 ++#define RADEON_BIOS_6_SCRATCH 0x0028 ++# define RADEON_ACC_MODE_CHANGE (1 << 2) ++# define RADEON_EXT_DESKTOP_MODE (1 << 3) ++# define RADEON_LCD_DPMS_ON (1 << 20) ++# define RADEON_CRT_DPMS_ON (1 << 21) ++# define RADEON_TV_DPMS_ON (1 << 22) ++# define RADEON_DFP_DPMS_ON (1 << 23) ++# define RADEON_DPMS_MASK (3 << 24) ++# define RADEON_DPMS_ON (0 << 24) ++# define RADEON_DPMS_STANDBY (1 << 24) ++# define RADEON_DPMS_SUSPEND (2 << 24) ++# define RADEON_DPMS_OFF (3 << 24) ++# define RADEON_SCREEN_BLANKING (1 << 26) ++# define RADEON_DRIVER_CRITICAL (1 << 27) ++# define RADEON_DISPLAY_SWITCHING_DIS (1 << 30) ++#define RADEON_BIOS_7_SCRATCH 0x002c ++# define RADEON_SYS_HOTKEY (1 << 10) ++# define RADEON_DRV_LOADED (1 << 12) ++#define RADEON_BIOS_ROM 0x0f30 /* PCI */ ++#define RADEON_BIST 0x0f0f /* PCI */ ++#define RADEON_BRUSH_DATA0 0x1480 ++#define RADEON_BRUSH_DATA1 0x1484 ++#define RADEON_BRUSH_DATA10 0x14a8 ++#define RADEON_BRUSH_DATA11 0x14ac ++#define RADEON_BRUSH_DATA12 0x14b0 ++#define RADEON_BRUSH_DATA13 0x14b4 ++#define RADEON_BRUSH_DATA14 0x14b8 ++#define RADEON_BRUSH_DATA15 0x14bc ++#define RADEON_BRUSH_DATA16 0x14c0 ++#define RADEON_BRUSH_DATA17 0x14c4 ++#define RADEON_BRUSH_DATA18 0x14c8 ++#define RADEON_BRUSH_DATA19 0x14cc ++#define RADEON_BRUSH_DATA2 0x1488 ++#define RADEON_BRUSH_DATA20 0x14d0 ++#define RADEON_BRUSH_DATA21 0x14d4 ++#define RADEON_BRUSH_DATA22 0x14d8 ++#define RADEON_BRUSH_DATA23 0x14dc ++#define RADEON_BRUSH_DATA24 0x14e0 ++#define RADEON_BRUSH_DATA25 0x14e4 ++#define RADEON_BRUSH_DATA26 0x14e8 ++#define RADEON_BRUSH_DATA27 0x14ec ++#define RADEON_BRUSH_DATA28 0x14f0 ++#define RADEON_BRUSH_DATA29 0x14f4 ++#define RADEON_BRUSH_DATA3 0x148c ++#define RADEON_BRUSH_DATA30 0x14f8 ++#define RADEON_BRUSH_DATA31 0x14fc ++#define RADEON_BRUSH_DATA32 0x1500 ++#define RADEON_BRUSH_DATA33 0x1504 ++#define RADEON_BRUSH_DATA34 0x1508 ++#define RADEON_BRUSH_DATA35 0x150c ++#define RADEON_BRUSH_DATA36 0x1510 ++#define RADEON_BRUSH_DATA37 0x1514 ++#define RADEON_BRUSH_DATA38 0x1518 ++#define RADEON_BRUSH_DATA39 0x151c ++#define RADEON_BRUSH_DATA4 0x1490 ++#define RADEON_BRUSH_DATA40 0x1520 ++#define RADEON_BRUSH_DATA41 0x1524 ++#define RADEON_BRUSH_DATA42 0x1528 ++#define RADEON_BRUSH_DATA43 0x152c ++#define RADEON_BRUSH_DATA44 0x1530 ++#define RADEON_BRUSH_DATA45 0x1534 ++#define RADEON_BRUSH_DATA46 0x1538 ++#define RADEON_BRUSH_DATA47 0x153c ++#define RADEON_BRUSH_DATA48 0x1540 ++#define RADEON_BRUSH_DATA49 0x1544 ++#define RADEON_BRUSH_DATA5 0x1494 ++#define RADEON_BRUSH_DATA50 0x1548 ++#define RADEON_BRUSH_DATA51 0x154c ++#define RADEON_BRUSH_DATA52 0x1550 ++#define RADEON_BRUSH_DATA53 0x1554 ++#define RADEON_BRUSH_DATA54 0x1558 ++#define RADEON_BRUSH_DATA55 0x155c ++#define RADEON_BRUSH_DATA56 0x1560 ++#define RADEON_BRUSH_DATA57 0x1564 ++#define RADEON_BRUSH_DATA58 0x1568 ++#define RADEON_BRUSH_DATA59 0x156c ++#define RADEON_BRUSH_DATA6 0x1498 ++#define RADEON_BRUSH_DATA60 0x1570 ++#define RADEON_BRUSH_DATA61 0x1574 ++#define RADEON_BRUSH_DATA62 0x1578 ++#define RADEON_BRUSH_DATA63 0x157c ++#define RADEON_BRUSH_DATA7 0x149c ++#define RADEON_BRUSH_DATA8 0x14a0 ++#define RADEON_BRUSH_DATA9 0x14a4 ++#define RADEON_BRUSH_SCALE 0x1470 ++#define RADEON_BRUSH_Y_X 0x1474 ++#define RADEON_BUS_CNTL 0x0030 ++# define RADEON_BUS_MASTER_DIS (1 << 6) ++# define RADEON_BUS_BIOS_DIS_ROM (1 << 12) ++# define RADEON_BUS_RD_DISCARD_EN (1 << 24) ++# define RADEON_BUS_RD_ABORT_EN (1 << 25) ++# define RADEON_BUS_MSTR_DISCONNECT_EN (1 << 28) ++# define RADEON_BUS_WRT_BURST (1 << 29) ++# define RADEON_BUS_READ_BURST (1 << 30) ++#define RADEON_BUS_CNTL1 0x0034 ++# define RADEON_BUS_WAIT_ON_LOCK_EN (1 << 4) ++ ++//#define RADEON_PCIE_INDEX 0x0030 ++//#define RADEON_PCIE_DATA 0x0034 ++#define RADEON_PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE */ ++# define RADEON_PCIE_LC_LINK_WIDTH_SHIFT 0 ++# define RADEON_PCIE_LC_LINK_WIDTH_MASK 0x7 ++# define RADEON_PCIE_LC_LINK_WIDTH_X0 0 ++# define RADEON_PCIE_LC_LINK_WIDTH_X1 1 ++# define RADEON_PCIE_LC_LINK_WIDTH_X2 2 ++# define RADEON_PCIE_LC_LINK_WIDTH_X4 3 ++# define RADEON_PCIE_LC_LINK_WIDTH_X8 4 ++# define RADEON_PCIE_LC_LINK_WIDTH_X12 5 ++# define RADEON_PCIE_LC_LINK_WIDTH_X16 6 ++# define RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT 4 ++# define RADEON_PCIE_LC_LINK_WIDTH_RD_MASK 0x70 ++# define RADEON_PCIE_LC_RECONFIG_NOW (1 << 8) ++# define RADEON_PCIE_LC_RECONFIG_LATER (1 << 9) ++# define RADEON_PCIE_LC_SHORT_RECONFIG_EN (1 << 10) ++ ++#define RADEON_CACHE_CNTL 0x1724 ++#define RADEON_CACHE_LINE 0x0f0c /* PCI */ ++#define RADEON_CAPABILITIES_ID 0x0f50 /* PCI */ ++#define RADEON_CAPABILITIES_PTR 0x0f34 /* PCI */ ++#define RADEON_CLK_PIN_CNTL 0x0001 /* PLL */ ++# define RADEON_SCLK_DYN_START_CNTL (1 << 15) ++#define RADEON_CLOCK_CNTL_DATA 0x000c ++#define RADEON_CLOCK_CNTL_INDEX 0x0008 ++# define RADEON_PLL_WR_EN (1 << 7) ++# define RADEON_PLL_DIV_SEL (3 << 8) ++# define RADEON_PLL2_DIV_SEL_MASK ~(3 << 8) ++#define RADEON_CLK_PWRMGT_CNTL 0x0014 ++# define RADEON_ENGIN_DYNCLK_MODE (1 << 12) ++# define RADEON_ACTIVE_HILO_LAT_MASK (3 << 13) ++# define RADEON_ACTIVE_HILO_LAT_SHIFT 13 ++# define RADEON_DISP_DYN_STOP_LAT_MASK (1 << 12) ++# define RADEON_MC_BUSY (1 << 16) ++# define RADEON_DLL_READY (1 << 19) ++# define RADEON_CG_NO1_DEBUG_0 (1 << 24) ++# define RADEON_CG_NO1_DEBUG_MASK (0x1f << 24) ++# define RADEON_DYN_STOP_MODE_MASK (7 << 21) ++# define RADEON_TVPLL_PWRMGT_OFF (1 << 30) ++# define RADEON_TVCLK_TURNOFF (1 << 31) ++#define RADEON_PLL_PWRMGT_CNTL 0x0015 ++# define RADEON_TCL_BYPASS_DISABLE (1 << 20) ++#define RADEON_CLR_CMP_CLR_3D 0x1a24 ++#define RADEON_CLR_CMP_CLR_DST 0x15c8 ++#define RADEON_CLR_CMP_CLR_SRC 0x15c4 ++#define RADEON_CLR_CMP_CNTL 0x15c0 ++# define RADEON_SRC_CMP_EQ_COLOR (4 << 0) ++# define RADEON_SRC_CMP_NEQ_COLOR (5 << 0) ++# define RADEON_CLR_CMP_SRC_SOURCE (1 << 24) ++#define RADEON_CLR_CMP_MASK 0x15cc ++# define RADEON_CLR_CMP_MSK 0xffffffff ++#define RADEON_CLR_CMP_MASK_3D 0x1A28 ++#define RADEON_COMMAND 0x0f04 /* PCI */ ++#define RADEON_COMPOSITE_SHADOW_ID 0x1a0c ++#define RADEON_CONFIG_APER_0_BASE 0x0100 ++#define RADEON_CONFIG_APER_1_BASE 0x0104 ++#define RADEON_CONFIG_APER_SIZE 0x0108 ++#define RADEON_CONFIG_BONDS 0x00e8 ++#define RADEON_CONFIG_CNTL 0x00e0 ++# define RADEON_CFG_ATI_REV_A11 (0 << 16) ++# define RADEON_CFG_ATI_REV_A12 (1 << 16) ++# define RADEON_CFG_ATI_REV_A13 (2 << 16) ++# define RADEON_CFG_ATI_REV_ID_MASK (0xf << 16) ++#define RADEON_CONFIG_MEMSIZE 0x00f8 ++#define RADEON_CONFIG_MEMSIZE_EMBEDDED 0x0114 ++#define RADEON_CONFIG_REG_1_BASE 0x010c ++#define RADEON_CONFIG_REG_APER_SIZE 0x0110 ++#define RADEON_CONFIG_XSTRAP 0x00e4 ++#define RADEON_CONSTANT_COLOR_C 0x1d34 ++# define RADEON_CONSTANT_COLOR_MASK 0x00ffffff ++# define RADEON_CONSTANT_COLOR_ONE 0x00ffffff ++# define RADEON_CONSTANT_COLOR_ZERO 0x00000000 ++#define RADEON_CRC_CMDFIFO_ADDR 0x0740 ++#define RADEON_CRC_CMDFIFO_DOUT 0x0744 ++#define RADEON_GRPH_BUFFER_CNTL 0x02f0 ++# define RADEON_GRPH_START_REQ_MASK (0x7f) ++# define RADEON_GRPH_START_REQ_SHIFT 0 ++# define RADEON_GRPH_STOP_REQ_MASK (0x7f<<8) ++# define RADEON_GRPH_STOP_REQ_SHIFT 8 ++# define RADEON_GRPH_CRITICAL_POINT_MASK (0x7f<<16) ++# define RADEON_GRPH_CRITICAL_POINT_SHIFT 16 ++# define RADEON_GRPH_CRITICAL_CNTL (1<<28) ++# define RADEON_GRPH_BUFFER_SIZE (1<<29) ++# define RADEON_GRPH_CRITICAL_AT_SOF (1<<30) ++# define RADEON_GRPH_STOP_CNTL (1<<31) ++#define RADEON_GRPH2_BUFFER_CNTL 0x03f0 ++# define RADEON_GRPH2_START_REQ_MASK (0x7f) ++# define RADEON_GRPH2_START_REQ_SHIFT 0 ++# define RADEON_GRPH2_STOP_REQ_MASK (0x7f<<8) ++# define RADEON_GRPH2_STOP_REQ_SHIFT 8 ++# define RADEON_GRPH2_CRITICAL_POINT_MASK (0x7f<<16) ++# define RADEON_GRPH2_CRITICAL_POINT_SHIFT 16 ++# define RADEON_GRPH2_CRITICAL_CNTL (1<<28) ++# define RADEON_GRPH2_BUFFER_SIZE (1<<29) ++# define RADEON_GRPH2_CRITICAL_AT_SOF (1<<30) ++# define RADEON_GRPH2_STOP_CNTL (1<<31) ++#define RADEON_CRTC_CRNT_FRAME 0x0214 ++#define RADEON_CRTC_EXT_CNTL 0x0054 ++# define RADEON_CRTC_VGA_XOVERSCAN (1 << 0) ++# define RADEON_VGA_ATI_LINEAR (1 << 3) ++# define RADEON_XCRT_CNT_EN (1 << 6) ++# define RADEON_CRTC_HSYNC_DIS (1 << 8) ++# define RADEON_CRTC_VSYNC_DIS (1 << 9) ++# define RADEON_CRTC_DISPLAY_DIS (1 << 10) ++# define RADEON_CRTC_SYNC_TRISTAT (1 << 11) ++# define RADEON_CRTC_CRT_ON (1 << 15) ++#define RADEON_CRTC_EXT_CNTL_DPMS_BYTE 0x0055 ++# define RADEON_CRTC_HSYNC_DIS_BYTE (1 << 0) ++# define RADEON_CRTC_VSYNC_DIS_BYTE (1 << 1) ++# define RADEON_CRTC_DISPLAY_DIS_BYTE (1 << 2) ++#define RADEON_CRTC_GEN_CNTL 0x0050 ++# define RADEON_CRTC_DBL_SCAN_EN (1 << 0) ++# define RADEON_CRTC_INTERLACE_EN (1 << 1) ++# define RADEON_CRTC_CSYNC_EN (1 << 4) ++# define RADEON_CRTC_ICON_EN (1 << 15) ++# define RADEON_CRTC_CUR_EN (1 << 16) ++# define RADEON_CRTC_CUR_MODE_MASK (7 << 20) ++# define RADEON_CRTC_CUR_MODE_SHIFT 20 ++# define RADEON_CRTC_CUR_MODE_MONO 0 ++# define RADEON_CRTC_CUR_MODE_24BPP 2 ++# define RADEON_CRTC_EXT_DISP_EN (1 << 24) ++# define RADEON_CRTC_EN (1 << 25) ++# define RADEON_CRTC_DISP_REQ_EN_B (1 << 26) ++#define RADEON_CRTC2_GEN_CNTL 0x03f8 ++# define RADEON_CRTC2_DBL_SCAN_EN (1 << 0) ++# define RADEON_CRTC2_INTERLACE_EN (1 << 1) ++# define RADEON_CRTC2_SYNC_TRISTAT (1 << 4) ++# define RADEON_CRTC2_HSYNC_TRISTAT (1 << 5) ++# define RADEON_CRTC2_VSYNC_TRISTAT (1 << 6) ++# define RADEON_CRTC2_CRT2_ON (1 << 7) ++# define RADEON_CRTC2_PIX_WIDTH_SHIFT 8 ++# define RADEON_CRTC2_PIX_WIDTH_MASK (0xf << 8) ++# define RADEON_CRTC2_ICON_EN (1 << 15) ++# define RADEON_CRTC2_CUR_EN (1 << 16) ++# define RADEON_CRTC2_CUR_MODE_MASK (7 << 20) ++# define RADEON_CRTC2_DISP_DIS (1 << 23) ++# define RADEON_CRTC2_EN (1 << 25) ++# define RADEON_CRTC2_DISP_REQ_EN_B (1 << 26) ++# define RADEON_CRTC2_CSYNC_EN (1 << 27) ++# define RADEON_CRTC2_HSYNC_DIS (1 << 28) ++# define RADEON_CRTC2_VSYNC_DIS (1 << 29) ++#define RADEON_CRTC_MORE_CNTL 0x27c ++# define RADEON_CRTC_AUTO_HORZ_CENTER_EN (1<<2) ++# define RADEON_CRTC_AUTO_VERT_CENTER_EN (1<<3) ++# define RADEON_CRTC_H_CUTOFF_ACTIVE_EN (1<<4) ++# define RADEON_CRTC_V_CUTOFF_ACTIVE_EN (1<<5) ++#define RADEON_CRTC_GUI_TRIG_VLINE 0x0218 ++#define RADEON_CRTC_H_SYNC_STRT_WID 0x0204 ++# define RADEON_CRTC_H_SYNC_STRT_PIX (0x07 << 0) ++# define RADEON_CRTC_H_SYNC_STRT_CHAR (0x3ff << 3) ++# define RADEON_CRTC_H_SYNC_STRT_CHAR_SHIFT 3 ++# define RADEON_CRTC_H_SYNC_WID (0x3f << 16) ++# define RADEON_CRTC_H_SYNC_WID_SHIFT 16 ++# define RADEON_CRTC_H_SYNC_POL (1 << 23) ++#define RADEON_CRTC2_H_SYNC_STRT_WID 0x0304 ++# define RADEON_CRTC2_H_SYNC_STRT_PIX (0x07 << 0) ++# define RADEON_CRTC2_H_SYNC_STRT_CHAR (0x3ff << 3) ++# define RADEON_CRTC2_H_SYNC_STRT_CHAR_SHIFT 3 ++# define RADEON_CRTC2_H_SYNC_WID (0x3f << 16) ++# define RADEON_CRTC2_H_SYNC_WID_SHIFT 16 ++# define RADEON_CRTC2_H_SYNC_POL (1 << 23) ++#define RADEON_CRTC_H_TOTAL_DISP 0x0200 ++# define RADEON_CRTC_H_TOTAL (0x03ff << 0) ++# define RADEON_CRTC_H_TOTAL_SHIFT 0 ++# define RADEON_CRTC_H_DISP (0x01ff << 16) ++# define RADEON_CRTC_H_DISP_SHIFT 16 ++#define RADEON_CRTC2_H_TOTAL_DISP 0x0300 ++# define RADEON_CRTC2_H_TOTAL (0x03ff << 0) ++# define RADEON_CRTC2_H_TOTAL_SHIFT 0 ++# define RADEON_CRTC2_H_DISP (0x01ff << 16) ++# define RADEON_CRTC2_H_DISP_SHIFT 16 ++ ++#define RADEON_CRTC_OFFSET_RIGHT 0x0220 ++#define RADEON_CRTC_OFFSET 0x0224 ++# define RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET (1<<30) ++# define RADEON_CRTC_OFFSET__OFFSET_LOCK (1<<31) ++ ++#define RADEON_CRTC2_OFFSET 0x0324 ++# define RADEON_CRTC2_OFFSET__GUI_TRIG_OFFSET (1<<30) ++# define RADEON_CRTC2_OFFSET__OFFSET_LOCK (1<<31) ++#define RADEON_CRTC_OFFSET_CNTL 0x0228 ++# define RADEON_CRTC_TILE_LINE_SHIFT 0 ++# define RADEON_CRTC_TILE_LINE_RIGHT_SHIFT 4 ++# define R300_CRTC_X_Y_MODE_EN_RIGHT (1 << 6) ++# define R300_CRTC_MICRO_TILE_BUFFER_RIGHT_MASK (3 << 7) ++# define R300_CRTC_MICRO_TILE_BUFFER_RIGHT_AUTO (0 << 7) ++# define R300_CRTC_MICRO_TILE_BUFFER_RIGHT_SINGLE (1 << 7) ++# define R300_CRTC_MICRO_TILE_BUFFER_RIGHT_DOUBLE (2 << 7) ++# define R300_CRTC_MICRO_TILE_BUFFER_RIGHT_DIS (3 << 7) ++# define R300_CRTC_X_Y_MODE_EN (1 << 9) ++# define R300_CRTC_MICRO_TILE_BUFFER_MASK (3 << 10) ++# define R300_CRTC_MICRO_TILE_BUFFER_AUTO (0 << 10) ++# define R300_CRTC_MICRO_TILE_BUFFER_SINGLE (1 << 10) ++# define R300_CRTC_MICRO_TILE_BUFFER_DOUBLE (2 << 10) ++# define R300_CRTC_MICRO_TILE_BUFFER_DIS (3 << 10) ++# define R300_CRTC_MICRO_TILE_EN_RIGHT (1 << 12) ++# define R300_CRTC_MICRO_TILE_EN (1 << 13) ++# define R300_CRTC_MACRO_TILE_EN_RIGHT (1 << 14) ++# define R300_CRTC_MACRO_TILE_EN (1 << 15) ++# define RADEON_CRTC_TILE_EN_RIGHT (1 << 14) ++# define RADEON_CRTC_TILE_EN (1 << 15) ++# define RADEON_CRTC_OFFSET_FLIP_CNTL (1 << 16) ++# define RADEON_CRTC_STEREO_OFFSET_EN (1 << 17) ++ ++#define R300_CRTC_TILE_X0_Y0 0x0350 ++#define R300_CRTC2_TILE_X0_Y0 0x0358 ++ ++#define RADEON_CRTC2_OFFSET_CNTL 0x0328 ++# define RADEON_CRTC2_OFFSET_FLIP_CNTL (1 << 16) ++# define RADEON_CRTC2_TILE_EN (1 << 15) ++#define RADEON_CRTC_PITCH 0x022c ++# define RADEON_CRTC_PITCH__SHIFT 0 ++# define RADEON_CRTC_PITCH__RIGHT_SHIFT 16 ++ ++#define RADEON_CRTC2_PITCH 0x032c ++#define RADEON_CRTC_STATUS 0x005c ++# define RADEON_CRTC_VBLANK_SAVE (1 << 1) ++# define RADEON_CRTC_VBLANK_SAVE_CLEAR (1 << 1) ++#define RADEON_CRTC2_STATUS 0x03fc ++# define RADEON_CRTC2_VBLANK_SAVE (1 << 1) ++# define RADEON_CRTC2_VBLANK_SAVE_CLEAR (1 << 1) ++#define RADEON_CRTC_V_SYNC_STRT_WID 0x020c ++# define RADEON_CRTC_V_SYNC_STRT (0x7ff << 0) ++# define RADEON_CRTC_V_SYNC_STRT_SHIFT 0 ++# define RADEON_CRTC_V_SYNC_WID (0x1f << 16) ++# define RADEON_CRTC_V_SYNC_WID_SHIFT 16 ++# define RADEON_CRTC_V_SYNC_POL (1 << 23) ++#define RADEON_CRTC2_V_SYNC_STRT_WID 0x030c ++# define RADEON_CRTC2_V_SYNC_STRT (0x7ff << 0) ++# define RADEON_CRTC2_V_SYNC_STRT_SHIFT 0 ++# define RADEON_CRTC2_V_SYNC_WID (0x1f << 16) ++# define RADEON_CRTC2_V_SYNC_WID_SHIFT 16 ++# define RADEON_CRTC2_V_SYNC_POL (1 << 23) ++#define RADEON_CRTC_V_TOTAL_DISP 0x0208 ++# define RADEON_CRTC_V_TOTAL (0x07ff << 0) ++# define RADEON_CRTC_V_TOTAL_SHIFT 0 ++# define RADEON_CRTC_V_DISP (0x07ff << 16) ++# define RADEON_CRTC_V_DISP_SHIFT 16 ++#define RADEON_CRTC2_V_TOTAL_DISP 0x0308 ++# define RADEON_CRTC2_V_TOTAL (0x07ff << 0) ++# define RADEON_CRTC2_V_TOTAL_SHIFT 0 ++# define RADEON_CRTC2_V_DISP (0x07ff << 16) ++# define RADEON_CRTC2_V_DISP_SHIFT 16 ++#define RADEON_CRTC_VLINE_CRNT_VLINE 0x0210 ++# define RADEON_CRTC_CRNT_VLINE_MASK (0x7ff << 16) ++#define RADEON_CRTC2_CRNT_FRAME 0x0314 ++#define RADEON_CRTC2_GUI_TRIG_VLINE 0x0318 ++#define RADEON_CRTC2_STATUS 0x03fc ++#define RADEON_CRTC2_VLINE_CRNT_VLINE 0x0310 ++#define RADEON_CRTC8_DATA 0x03d5 /* VGA, 0x3b5 */ ++#define RADEON_CRTC8_IDX 0x03d4 /* VGA, 0x3b4 */ ++#define RADEON_CUR_CLR0 0x026c ++#define RADEON_CUR_CLR1 0x0270 ++#define RADEON_CUR_HORZ_VERT_OFF 0x0268 ++#define RADEON_CUR_HORZ_VERT_POSN 0x0264 ++#define RADEON_CUR_OFFSET 0x0260 ++# define RADEON_CUR_LOCK (1 << 31) ++#define RADEON_CUR2_CLR0 0x036c ++#define RADEON_CUR2_CLR1 0x0370 ++#define RADEON_CUR2_HORZ_VERT_OFF 0x0368 ++#define RADEON_CUR2_HORZ_VERT_POSN 0x0364 ++#define RADEON_CUR2_OFFSET 0x0360 ++# define RADEON_CUR2_LOCK (1 << 31) ++ ++#define RADEON_DAC_CNTL 0x0058 ++# define RADEON_DAC_RANGE_CNTL (3 << 0) ++# define RADEON_DAC_RANGE_CNTL_PS2 (2 << 0) ++# define RADEON_DAC_RANGE_CNTL_MASK 0x03 ++# define RADEON_DAC_BLANKING (1 << 2) ++# define RADEON_DAC_CMP_EN (1 << 3) ++# define RADEON_DAC_CMP_OUTPUT (1 << 7) ++# define RADEON_DAC_8BIT_EN (1 << 8) ++# define RADEON_DAC_TVO_EN (1 << 10) ++# define RADEON_DAC_VGA_ADR_EN (1 << 13) ++# define RADEON_DAC_PDWN (1 << 15) ++# define RADEON_DAC_MASK_ALL (0xff << 24) ++#define RADEON_DAC_CNTL2 0x007c ++# define RADEON_DAC2_TV_CLK_SEL (0 << 1) ++# define RADEON_DAC2_DAC_CLK_SEL (1 << 0) ++# define RADEON_DAC2_DAC2_CLK_SEL (1 << 1) ++# define RADEON_DAC2_PALETTE_ACC_CTL (1 << 5) ++# define RADEON_DAC2_CMP_EN (1 << 7) ++# define RADEON_DAC2_CMP_OUT_R (1 << 8) ++# define RADEON_DAC2_CMP_OUT_G (1 << 9) ++# define RADEON_DAC2_CMP_OUT_B (1 << 10) ++# define RADEON_DAC2_CMP_OUTPUT (1 << 11) ++#define RADEON_DAC_EXT_CNTL 0x0280 ++# define RADEON_DAC2_FORCE_BLANK_OFF_EN (1 << 0) ++# define RADEON_DAC2_FORCE_DATA_EN (1 << 1) ++# define RADEON_DAC_FORCE_BLANK_OFF_EN (1 << 4) ++# define RADEON_DAC_FORCE_DATA_EN (1 << 5) ++# define RADEON_DAC_FORCE_DATA_SEL_MASK (3 << 6) ++# define RADEON_DAC_FORCE_DATA_SEL_R (0 << 6) ++# define RADEON_DAC_FORCE_DATA_SEL_G (1 << 6) ++# define RADEON_DAC_FORCE_DATA_SEL_B (2 << 6) ++# define RADEON_DAC_FORCE_DATA_SEL_RGB (3 << 6) ++# define RADEON_DAC_FORCE_DATA_MASK 0x0003ff00 ++# define RADEON_DAC_FORCE_DATA_SHIFT 8 ++#define RADEON_DAC_MACRO_CNTL 0x0d04 ++# define RADEON_DAC_PDWN_R (1 << 16) ++# define RADEON_DAC_PDWN_G (1 << 17) ++# define RADEON_DAC_PDWN_B (1 << 18) ++#define RADEON_DISP_PWR_MAN 0x0d08 ++# define RADEON_DISP_PWR_MAN_D3_CRTC_EN (1 << 0) ++# define RADEON_DISP_PWR_MAN_D3_CRTC2_EN (1 << 4) ++# define RADEON_DISP_PWR_MAN_DPMS_ON (0 << 8) ++# define RADEON_DISP_PWR_MAN_DPMS_STANDBY (1 << 8) ++# define RADEON_DISP_PWR_MAN_DPMS_SUSPEND (2 << 8) ++# define RADEON_DISP_PWR_MAN_DPMS_OFF (3 << 8) ++# define RADEON_DISP_D3_RST (1 << 16) ++# define RADEON_DISP_D3_REG_RST (1 << 17) ++# define RADEON_DISP_D3_GRPH_RST (1 << 18) ++# define RADEON_DISP_D3_SUBPIC_RST (1 << 19) ++# define RADEON_DISP_D3_OV0_RST (1 << 20) ++# define RADEON_DISP_D1D2_GRPH_RST (1 << 21) ++# define RADEON_DISP_D1D2_SUBPIC_RST (1 << 22) ++# define RADEON_DISP_D1D2_OV0_RST (1 << 23) ++# define RADEON_DIG_TMDS_ENABLE_RST (1 << 24) ++# define RADEON_TV_ENABLE_RST (1 << 25) ++# define RADEON_AUTO_PWRUP_EN (1 << 26) ++#define RADEON_TV_DAC_CNTL 0x088c ++# define RADEON_TV_DAC_NBLANK (1 << 0) ++# define RADEON_TV_DAC_NHOLD (1 << 1) ++# define RADEON_TV_DAC_PEDESTAL (1 << 2) ++# define RADEON_TV_MONITOR_DETECT_EN (1 << 4) ++# define RADEON_TV_DAC_CMPOUT (1 << 5) ++# define RADEON_TV_DAC_STD_MASK (3 << 8) ++# define RADEON_TV_DAC_STD_PAL (0 << 8) ++# define RADEON_TV_DAC_STD_NTSC (1 << 8) ++# define RADEON_TV_DAC_STD_PS2 (2 << 8) ++# define RADEON_TV_DAC_STD_RS343 (3 << 8) ++# define RADEON_TV_DAC_BGSLEEP (1 << 6) ++# define RADEON_TV_DAC_BGADJ_MASK (0xf << 16) ++# define RADEON_TV_DAC_BGADJ_SHIFT 16 ++# define RADEON_TV_DAC_DACADJ_MASK (0xf << 20) ++# define RADEON_TV_DAC_DACADJ_SHIFT 20 ++# define RADEON_TV_DAC_RDACPD (1 << 24) ++# define RADEON_TV_DAC_GDACPD (1 << 25) ++# define RADEON_TV_DAC_BDACPD (1 << 26) ++# define RADEON_TV_DAC_RDACDET (1 << 29) ++# define RADEON_TV_DAC_GDACDET (1 << 30) ++# define RADEON_TV_DAC_BDACDET (1 << 31) ++# define R420_TV_DAC_DACADJ_MASK (0x1f << 20) ++# define R420_TV_DAC_RDACPD (1 << 25) ++# define R420_TV_DAC_GDACPD (1 << 26) ++# define R420_TV_DAC_BDACPD (1 << 27) ++# define R420_TV_DAC_TVENABLE (1 << 28) ++#define RADEON_DISP_HW_DEBUG 0x0d14 ++# define RADEON_CRT2_DISP1_SEL (1 << 5) ++#define RADEON_DISP_OUTPUT_CNTL 0x0d64 ++# define RADEON_DISP_DAC_SOURCE_MASK 0x03 ++# define RADEON_DISP_DAC2_SOURCE_MASK 0x0c ++# define RADEON_DISP_DAC_SOURCE_CRTC2 0x01 ++# define RADEON_DISP_DAC_SOURCE_RMX 0x02 ++# define RADEON_DISP_DAC_SOURCE_LTU 0x03 ++# define RADEON_DISP_DAC2_SOURCE_CRTC2 0x04 ++# define RADEON_DISP_TVDAC_SOURCE_MASK (0x03 << 2) ++# define RADEON_DISP_TVDAC_SOURCE_CRTC 0x0 ++# define RADEON_DISP_TVDAC_SOURCE_CRTC2 (0x01 << 2) ++# define RADEON_DISP_TVDAC_SOURCE_RMX (0x02 << 2) ++# define RADEON_DISP_TVDAC_SOURCE_LTU (0x03 << 2) ++# define RADEON_DISP_TRANS_MATRIX_MASK (0x03 << 4) ++# define RADEON_DISP_TRANS_MATRIX_ALPHA_MSB (0x00 << 4) ++# define RADEON_DISP_TRANS_MATRIX_GRAPHICS (0x01 << 4) ++# define RADEON_DISP_TRANS_MATRIX_VIDEO (0x02 << 4) ++# define RADEON_DISP_TV_SOURCE_CRTC (1 << 16) /* crtc1 or crtc2 */ ++# define RADEON_DISP_TV_SOURCE_LTU (0 << 16) /* linear transform unit */ ++#define RADEON_DISP_TV_OUT_CNTL 0x0d6c ++# define RADEON_DISP_TV_PATH_SRC_CRTC2 (1 << 16) ++# define RADEON_DISP_TV_PATH_SRC_CRTC1 (0 << 16) ++#define RADEON_DAC_CRC_SIG 0x02cc ++#define RADEON_DAC_DATA 0x03c9 /* VGA */ ++#define RADEON_DAC_MASK 0x03c6 /* VGA */ ++#define RADEON_DAC_R_INDEX 0x03c7 /* VGA */ ++#define RADEON_DAC_W_INDEX 0x03c8 /* VGA */ ++#define RADEON_DDA_CONFIG 0x02e0 ++#define RADEON_DDA_ON_OFF 0x02e4 ++#define RADEON_DEFAULT_OFFSET 0x16e0 ++#define RADEON_DEFAULT_PITCH 0x16e4 ++#define RADEON_DEFAULT_SC_BOTTOM_RIGHT 0x16e8 ++# define RADEON_DEFAULT_SC_RIGHT_MAX (0x1fff << 0) ++# define RADEON_DEFAULT_SC_BOTTOM_MAX (0x1fff << 16) ++#define RADEON_DESTINATION_3D_CLR_CMP_VAL 0x1820 ++#define RADEON_DESTINATION_3D_CLR_CMP_MSK 0x1824 ++#define RADEON_DEVICE_ID 0x0f02 /* PCI */ ++#define RADEON_DISP_MISC_CNTL 0x0d00 ++# define RADEON_SOFT_RESET_GRPH_PP (1 << 0) ++#define RADEON_DISP_MERGE_CNTL 0x0d60 ++# define RADEON_DISP_ALPHA_MODE_MASK 0x03 ++# define RADEON_DISP_ALPHA_MODE_KEY 0 ++# define RADEON_DISP_ALPHA_MODE_PER_PIXEL 1 ++# define RADEON_DISP_ALPHA_MODE_GLOBAL 2 ++# define RADEON_DISP_RGB_OFFSET_EN (1 << 8) ++# define RADEON_DISP_GRPH_ALPHA_MASK (0xff << 16) ++# define RADEON_DISP_OV0_ALPHA_MASK (0xff << 24) ++# define RADEON_DISP_LIN_TRANS_BYPASS (0x01 << 9) ++#define RADEON_DISP2_MERGE_CNTL 0x0d68 ++# define RADEON_DISP2_RGB_OFFSET_EN (1 << 8) ++#define RADEON_DISP_LIN_TRANS_GRPH_A 0x0d80 ++#define RADEON_DISP_LIN_TRANS_GRPH_B 0x0d84 ++#define RADEON_DISP_LIN_TRANS_GRPH_C 0x0d88 ++#define RADEON_DISP_LIN_TRANS_GRPH_D 0x0d8c ++#define RADEON_DISP_LIN_TRANS_GRPH_E 0x0d90 ++#define RADEON_DISP_LIN_TRANS_GRPH_F 0x0d98 ++#define RADEON_DP_BRUSH_BKGD_CLR 0x1478 ++#define RADEON_DP_BRUSH_FRGD_CLR 0x147c ++#define RADEON_DP_CNTL 0x16c0 ++# define RADEON_DST_X_LEFT_TO_RIGHT (1 << 0) ++# define RADEON_DST_Y_TOP_TO_BOTTOM (1 << 1) ++# define RADEON_DP_DST_TILE_LINEAR (0 << 3) ++# define RADEON_DP_DST_TILE_MACRO (1 << 3) ++# define RADEON_DP_DST_TILE_MICRO (2 << 3) ++# define RADEON_DP_DST_TILE_BOTH (3 << 3) ++#define RADEON_DP_CNTL_XDIR_YDIR_YMAJOR 0x16d0 ++# define RADEON_DST_Y_MAJOR (1 << 2) ++# define RADEON_DST_Y_DIR_TOP_TO_BOTTOM (1 << 15) ++# define RADEON_DST_X_DIR_LEFT_TO_RIGHT (1 << 31) ++#define RADEON_DP_DATATYPE 0x16c4 ++# define RADEON_HOST_BIG_ENDIAN_EN (1 << 29) ++#define RADEON_DP_GUI_MASTER_CNTL 0x146c ++# define RADEON_GMC_SRC_PITCH_OFFSET_CNTL (1 << 0) ++# define RADEON_GMC_DST_PITCH_OFFSET_CNTL (1 << 1) ++# define RADEON_GMC_SRC_CLIPPING (1 << 2) ++# define RADEON_GMC_DST_CLIPPING (1 << 3) ++# define RADEON_GMC_BRUSH_DATATYPE_MASK (0x0f << 4) ++# define RADEON_GMC_BRUSH_8X8_MONO_FG_BG (0 << 4) ++# define RADEON_GMC_BRUSH_8X8_MONO_FG_LA (1 << 4) ++# define RADEON_GMC_BRUSH_1X8_MONO_FG_BG (4 << 4) ++# define RADEON_GMC_BRUSH_1X8_MONO_FG_LA (5 << 4) ++# define RADEON_GMC_BRUSH_32x1_MONO_FG_BG (6 << 4) ++# define RADEON_GMC_BRUSH_32x1_MONO_FG_LA (7 << 4) ++# define RADEON_GMC_BRUSH_32x32_MONO_FG_BG (8 << 4) ++# define RADEON_GMC_BRUSH_32x32_MONO_FG_LA (9 << 4) ++# define RADEON_GMC_BRUSH_8x8_COLOR (10 << 4) ++# define RADEON_GMC_BRUSH_1X8_COLOR (12 << 4) ++# define RADEON_GMC_BRUSH_SOLID_COLOR (13 << 4) ++# define RADEON_GMC_BRUSH_NONE (15 << 4) ++# define RADEON_GMC_DST_8BPP_CI (2 << 8) ++# define RADEON_GMC_DST_15BPP (3 << 8) ++# define RADEON_GMC_DST_16BPP (4 << 8) ++# define RADEON_GMC_DST_24BPP (5 << 8) ++# define RADEON_GMC_DST_32BPP (6 << 8) ++# define RADEON_GMC_DST_8BPP_RGB (7 << 8) ++# define RADEON_GMC_DST_Y8 (8 << 8) ++# define RADEON_GMC_DST_RGB8 (9 << 8) ++# define RADEON_GMC_DST_VYUY (11 << 8) ++# define RADEON_GMC_DST_YVYU (12 << 8) ++# define RADEON_GMC_DST_AYUV444 (14 << 8) ++# define RADEON_GMC_DST_ARGB4444 (15 << 8) ++# define RADEON_GMC_DST_DATATYPE_MASK (0x0f << 8) ++# define RADEON_GMC_DST_DATATYPE_SHIFT 8 ++# define RADEON_GMC_SRC_DATATYPE_MASK (3 << 12) ++# define RADEON_GMC_SRC_DATATYPE_MONO_FG_BG (0 << 12) ++# define RADEON_GMC_SRC_DATATYPE_MONO_FG_LA (1 << 12) ++# define RADEON_GMC_SRC_DATATYPE_COLOR (3 << 12) ++# define RADEON_GMC_BYTE_PIX_ORDER (1 << 14) ++# define RADEON_GMC_BYTE_MSB_TO_LSB (0 << 14) ++# define RADEON_GMC_BYTE_LSB_TO_MSB (1 << 14) ++# define RADEON_GMC_CONVERSION_TEMP (1 << 15) ++# define RADEON_GMC_CONVERSION_TEMP_6500 (0 << 15) ++# define RADEON_GMC_CONVERSION_TEMP_9300 (1 << 15) ++# define RADEON_GMC_ROP3_MASK (0xff << 16) ++# define RADEON_DP_SRC_SOURCE_MASK (7 << 24) ++# define RADEON_DP_SRC_SOURCE_MEMORY (2 << 24) ++# define RADEON_DP_SRC_SOURCE_HOST_DATA (3 << 24) ++# define RADEON_GMC_3D_FCN_EN (1 << 27) ++# define RADEON_GMC_CLR_CMP_CNTL_DIS (1 << 28) ++# define RADEON_GMC_AUX_CLIP_DIS (1 << 29) ++# define RADEON_GMC_WR_MSK_DIS (1 << 30) ++# define RADEON_GMC_LD_BRUSH_Y_X (1 << 31) ++# define RADEON_ROP3_ZERO 0x00000000 ++# define RADEON_ROP3_DSa 0x00880000 ++# define RADEON_ROP3_SDna 0x00440000 ++# define RADEON_ROP3_S 0x00cc0000 ++# define RADEON_ROP3_DSna 0x00220000 ++# define RADEON_ROP3_D 0x00aa0000 ++# define RADEON_ROP3_DSx 0x00660000 ++# define RADEON_ROP3_DSo 0x00ee0000 ++# define RADEON_ROP3_DSon 0x00110000 ++# define RADEON_ROP3_DSxn 0x00990000 ++# define RADEON_ROP3_Dn 0x00550000 ++# define RADEON_ROP3_SDno 0x00dd0000 ++# define RADEON_ROP3_Sn 0x00330000 ++# define RADEON_ROP3_DSno 0x00bb0000 ++# define RADEON_ROP3_DSan 0x00770000 ++# define RADEON_ROP3_ONE 0x00ff0000 ++# define RADEON_ROP3_DPa 0x00a00000 ++# define RADEON_ROP3_PDna 0x00500000 ++# define RADEON_ROP3_P 0x00f00000 ++# define RADEON_ROP3_DPna 0x000a0000 ++# define RADEON_ROP3_D 0x00aa0000 ++# define RADEON_ROP3_DPx 0x005a0000 ++# define RADEON_ROP3_DPo 0x00fa0000 ++# define RADEON_ROP3_DPon 0x00050000 ++# define RADEON_ROP3_PDxn 0x00a50000 ++# define RADEON_ROP3_PDno 0x00f50000 ++# define RADEON_ROP3_Pn 0x000f0000 ++# define RADEON_ROP3_DPno 0x00af0000 ++# define RADEON_ROP3_DPan 0x005f0000 ++#define RADEON_DP_GUI_MASTER_CNTL_C 0x1c84 ++#define RADEON_DP_MIX 0x16c8 ++#define RADEON_DP_SRC_BKGD_CLR 0x15dc ++#define RADEON_DP_SRC_FRGD_CLR 0x15d8 ++#define RADEON_DP_WRITE_MASK 0x16cc ++#define RADEON_DST_BRES_DEC 0x1630 ++#define RADEON_DST_BRES_ERR 0x1628 ++#define RADEON_DST_BRES_INC 0x162c ++#define RADEON_DST_BRES_LNTH 0x1634 ++#define RADEON_DST_BRES_LNTH_SUB 0x1638 ++#define RADEON_DST_HEIGHT 0x1410 ++#define RADEON_DST_HEIGHT_WIDTH 0x143c ++#define RADEON_DST_HEIGHT_WIDTH_8 0x158c ++#define RADEON_DST_HEIGHT_WIDTH_BW 0x15b4 ++#define RADEON_DST_HEIGHT_Y 0x15a0 ++#define RADEON_DST_LINE_START 0x1600 ++#define RADEON_DST_LINE_END 0x1604 ++#define RADEON_DST_LINE_PATCOUNT 0x1608 ++# define RADEON_BRES_CNTL_SHIFT 8 ++#define RADEON_DST_OFFSET 0x1404 ++#define RADEON_DST_PITCH 0x1408 ++#define RADEON_DST_PITCH_OFFSET 0x142c ++#define RADEON_DST_PITCH_OFFSET_C 0x1c80 ++# define RADEON_PITCH_SHIFT 21 ++# define RADEON_DST_TILE_LINEAR (0 << 30) ++# define RADEON_DST_TILE_MACRO (1 << 30) ++# define RADEON_DST_TILE_MICRO (2 << 30) ++# define RADEON_DST_TILE_BOTH (3 << 30) ++#define RADEON_DST_WIDTH 0x140c ++#define RADEON_DST_WIDTH_HEIGHT 0x1598 ++#define RADEON_DST_WIDTH_X 0x1588 ++#define RADEON_DST_WIDTH_X_INCY 0x159c ++#define RADEON_DST_X 0x141c ++#define RADEON_DST_X_SUB 0x15a4 ++#define RADEON_DST_X_Y 0x1594 ++#define RADEON_DST_Y 0x1420 ++#define RADEON_DST_Y_SUB 0x15a8 ++#define RADEON_DST_Y_X 0x1438 ++ ++#define RADEON_FCP_CNTL 0x0910 ++# define RADEON_FCP0_SRC_PCICLK 0 ++# define RADEON_FCP0_SRC_PCLK 1 ++# define RADEON_FCP0_SRC_PCLKb 2 ++# define RADEON_FCP0_SRC_HREF 3 ++# define RADEON_FCP0_SRC_GND 4 ++# define RADEON_FCP0_SRC_HREFb 5 ++#define RADEON_FLUSH_1 0x1704 ++#define RADEON_FLUSH_2 0x1708 ++#define RADEON_FLUSH_3 0x170c ++#define RADEON_FLUSH_4 0x1710 ++#define RADEON_FLUSH_5 0x1714 ++#define RADEON_FLUSH_6 0x1718 ++#define RADEON_FLUSH_7 0x171c ++#define RADEON_FOG_3D_TABLE_START 0x1810 ++#define RADEON_FOG_3D_TABLE_END 0x1814 ++#define RADEON_FOG_3D_TABLE_DENSITY 0x181c ++#define RADEON_FOG_TABLE_INDEX 0x1a14 ++#define RADEON_FOG_TABLE_DATA 0x1a18 ++#define RADEON_FP_CRTC_H_TOTAL_DISP 0x0250 ++#define RADEON_FP_CRTC_V_TOTAL_DISP 0x0254 ++# define RADEON_FP_CRTC_H_TOTAL_MASK 0x000003ff ++# define RADEON_FP_CRTC_H_DISP_MASK 0x01ff0000 ++# define RADEON_FP_CRTC_V_TOTAL_MASK 0x00000fff ++# define RADEON_FP_CRTC_V_DISP_MASK 0x0fff0000 ++# define RADEON_FP_H_SYNC_STRT_CHAR_MASK 0x00001ff8 ++# define RADEON_FP_H_SYNC_WID_MASK 0x003f0000 ++# define RADEON_FP_V_SYNC_STRT_MASK 0x00000fff ++# define RADEON_FP_V_SYNC_WID_MASK 0x001f0000 ++# define RADEON_FP_CRTC_H_TOTAL_SHIFT 0x00000000 ++# define RADEON_FP_CRTC_H_DISP_SHIFT 0x00000010 ++# define RADEON_FP_CRTC_V_TOTAL_SHIFT 0x00000000 ++# define RADEON_FP_CRTC_V_DISP_SHIFT 0x00000010 ++# define RADEON_FP_H_SYNC_STRT_CHAR_SHIFT 0x00000003 ++# define RADEON_FP_H_SYNC_WID_SHIFT 0x00000010 ++# define RADEON_FP_V_SYNC_STRT_SHIFT 0x00000000 ++# define RADEON_FP_V_SYNC_WID_SHIFT 0x00000010 ++#define RADEON_FP_GEN_CNTL 0x0284 ++# define RADEON_FP_FPON (1 << 0) ++# define RADEON_FP_BLANK_EN (1 << 1) ++# define RADEON_FP_TMDS_EN (1 << 2) ++# define RADEON_FP_PANEL_FORMAT (1 << 3) ++# define RADEON_FP_EN_TMDS (1 << 7) ++# define RADEON_FP_DETECT_SENSE (1 << 8) ++# define R200_FP_SOURCE_SEL_MASK (3 << 10) ++# define R200_FP_SOURCE_SEL_CRTC1 (0 << 10) ++# define R200_FP_SOURCE_SEL_CRTC2 (1 << 10) ++# define R200_FP_SOURCE_SEL_RMX (2 << 10) ++# define R200_FP_SOURCE_SEL_TRANS (3 << 10) ++# define RADEON_FP_SEL_CRTC1 (0 << 13) ++# define RADEON_FP_SEL_CRTC2 (1 << 13) ++# define RADEON_FP_CRTC_DONT_SHADOW_HPAR (1 << 15) ++# define RADEON_FP_CRTC_DONT_SHADOW_VPAR (1 << 16) ++# define RADEON_FP_CRTC_DONT_SHADOW_HEND (1 << 17) ++# define RADEON_FP_CRTC_USE_SHADOW_VEND (1 << 18) ++# define RADEON_FP_RMX_HVSYNC_CONTROL_EN (1 << 20) ++# define RADEON_FP_DFP_SYNC_SEL (1 << 21) ++# define RADEON_FP_CRTC_LOCK_8DOT (1 << 22) ++# define RADEON_FP_CRT_SYNC_SEL (1 << 23) ++# define RADEON_FP_USE_SHADOW_EN (1 << 24) ++# define RADEON_FP_CRT_SYNC_ALT (1 << 26) ++#define RADEON_FP2_GEN_CNTL 0x0288 ++# define RADEON_FP2_BLANK_EN (1 << 1) ++# define RADEON_FP2_ON (1 << 2) ++# define RADEON_FP2_PANEL_FORMAT (1 << 3) ++# define RADEON_FP2_DETECT_SENSE (1 << 8) ++# define R200_FP2_SOURCE_SEL_MASK (3 << 10) ++# define R200_FP2_SOURCE_SEL_CRTC1 (0 << 10) ++# define R200_FP2_SOURCE_SEL_CRTC2 (1 << 10) ++# define R200_FP2_SOURCE_SEL_RMX (2 << 10) ++# define R200_FP2_SOURCE_SEL_TRANS_UNIT (3 << 10) ++# define RADEON_FP2_SRC_SEL_MASK (3 << 13) ++# define RADEON_FP2_SRC_SEL_CRTC2 (1 << 13) ++# define RADEON_FP2_FP_POL (1 << 16) ++# define RADEON_FP2_LP_POL (1 << 17) ++# define RADEON_FP2_SCK_POL (1 << 18) ++# define RADEON_FP2_LCD_CNTL_MASK (7 << 19) ++# define RADEON_FP2_PAD_FLOP_EN (1 << 22) ++# define RADEON_FP2_CRC_EN (1 << 23) ++# define RADEON_FP2_CRC_READ_EN (1 << 24) ++# define RADEON_FP2_DVO_EN (1 << 25) ++# define RADEON_FP2_DVO_RATE_SEL_SDR (1 << 26) ++# define R200_FP2_DVO_RATE_SEL_SDR (1 << 27) ++# define R300_FP2_DVO_CLOCK_MODE_SINGLE (1 << 28) ++# define R300_FP2_DVO_DUAL_CHANNEL_EN (1 << 29) ++#define RADEON_FP_H_SYNC_STRT_WID 0x02c4 ++#define RADEON_FP_H2_SYNC_STRT_WID 0x03c4 ++#define RADEON_FP_HORZ_STRETCH 0x028c ++#define RADEON_FP_HORZ2_STRETCH 0x038c ++# define RADEON_HORZ_STRETCH_RATIO_MASK 0xffff ++# define RADEON_HORZ_STRETCH_RATIO_MAX 4096 ++# define RADEON_HORZ_PANEL_SIZE (0x1ff << 16) ++# define RADEON_HORZ_PANEL_SHIFT 16 ++# define RADEON_HORZ_STRETCH_PIXREP (0 << 25) ++# define RADEON_HORZ_STRETCH_BLEND (1 << 26) ++# define RADEON_HORZ_STRETCH_ENABLE (1 << 25) ++# define RADEON_HORZ_AUTO_RATIO (1 << 27) ++# define RADEON_HORZ_FP_LOOP_STRETCH (0x7 << 28) ++# define RADEON_HORZ_AUTO_RATIO_INC (1 << 31) ++#define RADEON_FP_HORZ_VERT_ACTIVE 0x0278 ++#define RADEON_FP_V_SYNC_STRT_WID 0x02c8 ++#define RADEON_FP_VERT_STRETCH 0x0290 ++#define RADEON_FP_V2_SYNC_STRT_WID 0x03c8 ++#define RADEON_FP_VERT2_STRETCH 0x0390 ++# define RADEON_VERT_PANEL_SIZE (0xfff << 12) ++# define RADEON_VERT_PANEL_SHIFT 12 ++# define RADEON_VERT_STRETCH_RATIO_MASK 0xfff ++# define RADEON_VERT_STRETCH_RATIO_SHIFT 0 ++# define RADEON_VERT_STRETCH_RATIO_MAX 4096 ++# define RADEON_VERT_STRETCH_ENABLE (1 << 25) ++# define RADEON_VERT_STRETCH_LINEREP (0 << 26) ++# define RADEON_VERT_STRETCH_BLEND (1 << 26) ++# define RADEON_VERT_AUTO_RATIO_EN (1 << 27) ++# define RADEON_VERT_AUTO_RATIO_INC (1 << 31) ++# define RADEON_VERT_STRETCH_RESERVED 0x71000000 ++#define RS400_FP_2ND_GEN_CNTL 0x0384 ++# define RS400_FP_2ND_ON (1 << 0) ++# define RS400_FP_2ND_BLANK_EN (1 << 1) ++# define RS400_TMDS_2ND_EN (1 << 2) ++# define RS400_PANEL_FORMAT_2ND (1 << 3) ++# define RS400_FP_2ND_EN_TMDS (1 << 7) ++# define RS400_FP_2ND_DETECT_SENSE (1 << 8) ++# define RS400_FP_2ND_SOURCE_SEL_MASK (3 << 10) ++# define RS400_FP_2ND_SOURCE_SEL_CRTC1 (0 << 10) ++# define RS400_FP_2ND_SOURCE_SEL_CRTC2 (1 << 10) ++# define RS400_FP_2ND_SOURCE_SEL_RMX (2 << 10) ++# define RS400_FP_2ND_DETECT_EN (1 << 12) ++# define RS400_HPD_2ND_SEL (1 << 13) ++#define RS400_FP2_2_GEN_CNTL 0x0388 ++# define RS400_FP2_2_BLANK_EN (1 << 1) ++# define RS400_FP2_2_ON (1 << 2) ++# define RS400_FP2_2_PANEL_FORMAT (1 << 3) ++# define RS400_FP2_2_DETECT_SENSE (1 << 8) ++# define RS400_FP2_2_SOURCE_SEL_MASK (3 << 10) ++# define RS400_FP2_2_SOURCE_SEL_CRTC1 (0 << 10) ++# define RS400_FP2_2_SOURCE_SEL_CRTC2 (1 << 10) ++# define RS400_FP2_2_SOURCE_SEL_RMX (2 << 10) ++# define RS400_FP2_2_DVO2_EN (1 << 25) ++#define RS400_TMDS2_CNTL 0x0394 ++#define RS400_TMDS2_TRANSMITTER_CNTL 0x03a4 ++# define RS400_TMDS2_PLLEN (1 << 0) ++# define RS400_TMDS2_PLLRST (1 << 1) ++ ++#define RADEON_GEN_INT_CNTL 0x0040 ++#define RADEON_GEN_INT_STATUS 0x0044 ++# define RADEON_VSYNC_INT_AK (1 << 2) ++# define RADEON_VSYNC_INT (1 << 2) ++# define RADEON_VSYNC2_INT_AK (1 << 6) ++# define RADEON_VSYNC2_INT (1 << 6) ++#define RADEON_GENENB 0x03c3 /* VGA */ ++#define RADEON_GENFC_RD 0x03ca /* VGA */ ++#define RADEON_GENFC_WT 0x03da /* VGA, 0x03ba */ ++#define RADEON_GENMO_RD 0x03cc /* VGA */ ++#define RADEON_GENMO_WT 0x03c2 /* VGA */ ++#define RADEON_GENS0 0x03c2 /* VGA */ ++#define RADEON_GENS1 0x03da /* VGA, 0x03ba */ ++#define RADEON_GPIO_MONID 0x0068 /* DDC interface via I2C */ ++#define RADEON_GPIO_MONIDB 0x006c ++#define RADEON_GPIO_CRT2_DDC 0x006c ++#define RADEON_GPIO_DVI_DDC 0x0064 ++#define RADEON_GPIO_VGA_DDC 0x0060 ++# define RADEON_GPIO_A_0 (1 << 0) ++# define RADEON_GPIO_A_1 (1 << 1) ++# define RADEON_GPIO_Y_0 (1 << 8) ++# define RADEON_GPIO_Y_1 (1 << 9) ++# define RADEON_GPIO_Y_SHIFT_0 8 ++# define RADEON_GPIO_Y_SHIFT_1 9 ++# define RADEON_GPIO_EN_0 (1 << 16) ++# define RADEON_GPIO_EN_1 (1 << 17) ++# define RADEON_GPIO_MASK_0 (1 << 24) /*??*/ ++# define RADEON_GPIO_MASK_1 (1 << 25) /*??*/ ++#define RADEON_GRPH8_DATA 0x03cf /* VGA */ ++#define RADEON_GRPH8_IDX 0x03ce /* VGA */ ++#define RADEON_GUI_SCRATCH_REG0 0x15e0 ++#define RADEON_GUI_SCRATCH_REG1 0x15e4 ++#define RADEON_GUI_SCRATCH_REG2 0x15e8 ++#define RADEON_GUI_SCRATCH_REG3 0x15ec ++#define RADEON_GUI_SCRATCH_REG4 0x15f0 ++#define RADEON_GUI_SCRATCH_REG5 0x15f4 ++ ++#define RADEON_HEADER 0x0f0e /* PCI */ ++#define RADEON_HOST_DATA0 0x17c0 ++#define RADEON_HOST_DATA1 0x17c4 ++#define RADEON_HOST_DATA2 0x17c8 ++#define RADEON_HOST_DATA3 0x17cc ++#define RADEON_HOST_DATA4 0x17d0 ++#define RADEON_HOST_DATA5 0x17d4 ++#define RADEON_HOST_DATA6 0x17d8 ++#define RADEON_HOST_DATA7 0x17dc ++#define RADEON_HOST_DATA_LAST 0x17e0 ++#define RADEON_HOST_PATH_CNTL 0x0130 ++# define RADEON_HDP_SOFT_RESET (1 << 26) ++# define RADEON_HDP_APER_CNTL (1 << 23) ++#define RADEON_HTOTAL_CNTL 0x0009 /* PLL */ ++# define RADEON_HTOT_CNTL_VGA_EN (1 << 28) ++#define RADEON_HTOTAL2_CNTL 0x002e /* PLL */ ++ ++ /* Multimedia I2C bus */ ++#define RADEON_I2C_CNTL_0 0x0090 ++#define RADEON_I2C_DONE (1<<0) ++#define RADEON_I2C_NACK (1<<1) ++#define RADEON_I2C_HALT (1<<2) ++#define RADEON_I2C_SOFT_RST (1<<5) ++#define RADEON_I2C_DRIVE_EN (1<<6) ++#define RADEON_I2C_DRIVE_SEL (1<<7) ++#define RADEON_I2C_START (1<<8) ++#define RADEON_I2C_STOP (1<<9) ++#define RADEON_I2C_RECEIVE (1<<10) ++#define RADEON_I2C_ABORT (1<<11) ++#define RADEON_I2C_GO (1<<12) ++#define RADEON_I2C_CNTL_1 0x0094 ++#define RADEON_I2C_SEL (1<<16) ++#define RADEON_I2C_EN (1<<17) ++#define RADEON_I2C_DATA 0x0098 ++ ++#define RADEON_DVI_I2C_CNTL_0 0x02e0 ++#define RADEON_DVI_I2C_CNTL_1 0x02e4 /* ? */ ++#define RADEON_DVI_I2C_DATA 0x02e8 ++ ++#define RADEON_INTERRUPT_LINE 0x0f3c /* PCI */ ++#define RADEON_INTERRUPT_PIN 0x0f3d /* PCI */ ++#define RADEON_IO_BASE 0x0f14 /* PCI */ ++ ++#define RADEON_LATENCY 0x0f0d /* PCI */ ++#define RADEON_LEAD_BRES_DEC 0x1608 ++#define RADEON_LEAD_BRES_LNTH 0x161c ++#define RADEON_LEAD_BRES_LNTH_SUB 0x1624 ++#define RADEON_LVDS_GEN_CNTL 0x02d0 ++# define RADEON_LVDS_ON (1 << 0) ++# define RADEON_LVDS_DISPLAY_DIS (1 << 1) ++# define RADEON_LVDS_PANEL_TYPE (1 << 2) ++# define RADEON_LVDS_PANEL_FORMAT (1 << 3) ++# define RADEON_LVDS_NO_FM (0 << 4) ++# define RADEON_LVDS_2_GREY (1 << 4) ++# define RADEON_LVDS_4_GREY (2 << 4) ++# define RADEON_LVDS_RST_FM (1 << 6) ++# define RADEON_LVDS_EN (1 << 7) ++# define RADEON_LVDS_BL_MOD_LEVEL_SHIFT 8 ++# define RADEON_LVDS_BL_MOD_LEVEL_MASK (0xff << 8) ++# define RADEON_LVDS_BL_MOD_EN (1 << 16) ++# define RADEON_LVDS_BL_CLK_SEL (1 << 17) ++# define RADEON_LVDS_DIGON (1 << 18) ++# define RADEON_LVDS_BLON (1 << 19) ++# define RADEON_LVDS_FP_POL_LOW (1 << 20) ++# define RADEON_LVDS_LP_POL_LOW (1 << 21) ++# define RADEON_LVDS_DTM_POL_LOW (1 << 22) ++# define RADEON_LVDS_SEL_CRTC2 (1 << 23) ++# define RADEON_LVDS_FPDI_EN (1 << 27) ++# define RADEON_LVDS_HSYNC_DELAY_SHIFT 28 ++#define RADEON_LVDS_PLL_CNTL 0x02d4 ++# define RADEON_HSYNC_DELAY_SHIFT 28 ++# define RADEON_HSYNC_DELAY_MASK (0xf << 28) ++# define RADEON_LVDS_PLL_EN (1 << 16) ++# define RADEON_LVDS_PLL_RESET (1 << 17) ++# define R300_LVDS_SRC_SEL_MASK (3 << 18) ++# define R300_LVDS_SRC_SEL_CRTC1 (0 << 18) ++# define R300_LVDS_SRC_SEL_CRTC2 (1 << 18) ++# define R300_LVDS_SRC_SEL_RMX (2 << 18) ++#define RADEON_LVDS_SS_GEN_CNTL 0x02ec ++# define RADEON_LVDS_PWRSEQ_DELAY1_SHIFT 16 ++# define RADEON_LVDS_PWRSEQ_DELAY2_SHIFT 20 ++ ++#define RADEON_MAX_LATENCY 0x0f3f /* PCI */ ++#define RADEON_MC_AGP_LOCATION 0x014c ++#define RADEON_MC_FB_LOCATION 0x0148 ++#define RADEON_DISPLAY_BASE_ADDR 0x23c ++#define RADEON_DISPLAY2_BASE_ADDR 0x33c ++#define RADEON_OV0_BASE_ADDR 0x43c ++#define RADEON_NB_TOM 0x15c ++#define R300_MC_INIT_MISC_LAT_TIMER 0x180 ++#define RADEON_MCLK_CNTL 0x0012 /* PLL */ ++# define RADEON_FORCEON_MCLKA (1 << 16) ++# define RADEON_FORCEON_MCLKB (1 << 17) ++# define RADEON_FORCEON_YCLKA (1 << 18) ++# define RADEON_FORCEON_YCLKB (1 << 19) ++# define RADEON_FORCEON_MC (1 << 20) ++# define RADEON_FORCEON_AIC (1 << 21) ++# define R300_DISABLE_MC_MCLKA (1 << 21) ++# define R300_DISABLE_MC_MCLKB (1 << 21) ++#define RADEON_MCLK_MISC 0x001f /* PLL */ ++# define RADEON_MC_MCLK_MAX_DYN_STOP_LAT (1 << 12) ++# define RADEON_IO_MCLK_MAX_DYN_STOP_LAT (1 << 13) ++# define RADEON_MC_MCLK_DYN_ENABLE (1 << 14) ++# define RADEON_IO_MCLK_DYN_ENABLE (1 << 15) ++#define RADEON_LCD_GPIO_MASK 0x01a0 ++#define RADEON_GPIOPAD_EN 0x01a0 ++#define RADEON_LCD_GPIO_Y_REG 0x01a4 ++#define RADEON_MDGPIO_A_REG 0x01ac ++#define RADEON_MDGPIO_EN_REG 0x01b0 ++#define RADEON_MDGPIO_MASK 0x0198 ++#define RADEON_GPIOPAD_MASK 0x0198 ++#define RADEON_GPIOPAD_A 0x019c ++#define RADEON_MDGPIO_Y_REG 0x01b4 ++#define RADEON_MEM_ADDR_CONFIG 0x0148 ++#define RADEON_MEM_BASE 0x0f10 /* PCI */ ++#define RADEON_MEM_CNTL 0x0140 ++# define RADEON_MEM_NUM_CHANNELS_MASK 0x01 ++# define RADEON_MEM_USE_B_CH_ONLY (1 << 1) ++# define RV100_HALF_MODE (1 << 3) ++# define R300_MEM_NUM_CHANNELS_MASK 0x03 ++# define R300_MEM_USE_CD_CH_ONLY (1 << 2) ++#define RADEON_MEM_TIMING_CNTL 0x0144 /* EXT_MEM_CNTL */ ++#define RADEON_MEM_INIT_LAT_TIMER 0x0154 ++#define RADEON_MEM_INTF_CNTL 0x014c ++#define RADEON_MEM_SDRAM_MODE_REG 0x0158 ++# define RADEON_SDRAM_MODE_MASK 0xffff0000 ++# define RADEON_B3MEM_RESET_MASK 0x6fffffff ++# define RADEON_MEM_CFG_TYPE_DDR (1 << 30) ++#define RADEON_MEM_STR_CNTL 0x0150 ++# define RADEON_MEM_PWRUP_COMPL_A (1 << 0) ++# define RADEON_MEM_PWRUP_COMPL_B (1 << 1) ++# define R300_MEM_PWRUP_COMPL_C (1 << 2) ++# define R300_MEM_PWRUP_COMPL_D (1 << 3) ++# define RADEON_MEM_PWRUP_COMPLETE 0x03 ++# define R300_MEM_PWRUP_COMPLETE 0x0f ++#define RADEON_MC_STATUS 0x0150 ++# define RADEON_MC_IDLE (1 << 2) ++# define R300_MC_IDLE (1 << 4) ++#define RADEON_MEM_VGA_RP_SEL 0x003c ++#define RADEON_MEM_VGA_WP_SEL 0x0038 ++#define RADEON_MIN_GRANT 0x0f3e /* PCI */ ++#define RADEON_MM_DATA 0x0004 ++#define RADEON_MM_INDEX 0x0000 ++#define RADEON_MPLL_CNTL 0x000e /* PLL */ ++#define RADEON_MPP_TB_CONFIG 0x01c0 /* ? */ ++#define RADEON_MPP_GP_CONFIG 0x01c8 /* ? */ ++#define RADEON_SEPROM_CNTL1 0x01c0 ++# define RADEON_SCK_PRESCALE_SHIFT 24 ++# define RADEON_SCK_PRESCALE_MASK (0xff << 24) ++#define R300_MC_IND_INDEX 0x01f8 ++# define R300_MC_IND_ADDR_MASK 0x3f ++# define R300_MC_IND_WR_EN (1 << 8) ++#define R300_MC_IND_DATA 0x01fc ++#define R300_MC_READ_CNTL_AB 0x017c ++# define R300_MEM_RBS_POSITION_A_MASK 0x03 ++#define R300_MC_READ_CNTL_CD_mcind 0x24 ++# define R300_MEM_RBS_POSITION_C_MASK 0x03 ++ ++#define RADEON_N_VIF_COUNT 0x0248 ++ ++#define RADEON_OV0_AUTO_FLIP_CNTL 0x0470 ++# define RADEON_OV0_AUTO_FLIP_CNTL_SOFT_BUF_NUM 0x00000007 ++# define RADEON_OV0_AUTO_FLIP_CNTL_SOFT_REPEAT_FIELD 0x00000008 ++# define RADEON_OV0_AUTO_FLIP_CNTL_SOFT_BUF_ODD 0x00000010 ++# define RADEON_OV0_AUTO_FLIP_CNTL_IGNORE_REPEAT_FIELD 0x00000020 ++# define RADEON_OV0_AUTO_FLIP_CNTL_SOFT_EOF_TOGGLE 0x00000040 ++# define RADEON_OV0_AUTO_FLIP_CNTL_VID_PORT_SELECT 0x00000300 ++# define RADEON_OV0_AUTO_FLIP_CNTL_P1_FIRST_LINE_EVEN 0x00010000 ++# define RADEON_OV0_AUTO_FLIP_CNTL_SHIFT_EVEN_DOWN 0x00040000 ++# define RADEON_OV0_AUTO_FLIP_CNTL_SHIFT_ODD_DOWN 0x00080000 ++# define RADEON_OV0_AUTO_FLIP_CNTL_FIELD_POL_SOURCE 0x00800000 ++ ++#define RADEON_OV0_COLOUR_CNTL 0x04E0 ++#define RADEON_OV0_DEINTERLACE_PATTERN 0x0474 ++#define RADEON_OV0_EXCLUSIVE_HORZ 0x0408 ++# define RADEON_EXCL_HORZ_START_MASK 0x000000ff ++# define RADEON_EXCL_HORZ_END_MASK 0x0000ff00 ++# define RADEON_EXCL_HORZ_BACK_PORCH_MASK 0x00ff0000 ++# define RADEON_EXCL_HORZ_EXCLUSIVE_EN 0x80000000 ++#define RADEON_OV0_EXCLUSIVE_VERT 0x040C ++# define RADEON_EXCL_VERT_START_MASK 0x000003ff ++# define RADEON_EXCL_VERT_END_MASK 0x03ff0000 ++#define RADEON_OV0_FILTER_CNTL 0x04A0 ++# define RADEON_FILTER_PROGRAMMABLE_COEF 0x0 ++# define RADEON_FILTER_HC_COEF_HORZ_Y 0x1 ++# define RADEON_FILTER_HC_COEF_HORZ_UV 0x2 ++# define RADEON_FILTER_HC_COEF_VERT_Y 0x4 ++# define RADEON_FILTER_HC_COEF_VERT_UV 0x8 ++# define RADEON_FILTER_HARDCODED_COEF 0xf ++# define RADEON_FILTER_COEF_MASK 0xf ++ ++#define RADEON_OV0_FOUR_TAP_COEF_0 0x04B0 ++#define RADEON_OV0_FOUR_TAP_COEF_1 0x04B4 ++#define RADEON_OV0_FOUR_TAP_COEF_2 0x04B8 ++#define RADEON_OV0_FOUR_TAP_COEF_3 0x04BC ++#define RADEON_OV0_FOUR_TAP_COEF_4 0x04C0 ++#define RADEON_OV0_FLAG_CNTL 0x04DC ++#define RADEON_OV0_GAMMA_000_00F 0x0d40 ++#define RADEON_OV0_GAMMA_010_01F 0x0d44 ++#define RADEON_OV0_GAMMA_020_03F 0x0d48 ++#define RADEON_OV0_GAMMA_040_07F 0x0d4c ++#define RADEON_OV0_GAMMA_080_0BF 0x0e00 ++#define RADEON_OV0_GAMMA_0C0_0FF 0x0e04 ++#define RADEON_OV0_GAMMA_100_13F 0x0e08 ++#define RADEON_OV0_GAMMA_140_17F 0x0e0c ++#define RADEON_OV0_GAMMA_180_1BF 0x0e10 ++#define RADEON_OV0_GAMMA_1C0_1FF 0x0e14 ++#define RADEON_OV0_GAMMA_200_23F 0x0e18 ++#define RADEON_OV0_GAMMA_240_27F 0x0e1c ++#define RADEON_OV0_GAMMA_280_2BF 0x0e20 ++#define RADEON_OV0_GAMMA_2C0_2FF 0x0e24 ++#define RADEON_OV0_GAMMA_300_33F 0x0e28 ++#define RADEON_OV0_GAMMA_340_37F 0x0e2c ++#define RADEON_OV0_GAMMA_380_3BF 0x0d50 ++#define RADEON_OV0_GAMMA_3C0_3FF 0x0d54 ++#define RADEON_OV0_GRAPHICS_KEY_CLR_LOW 0x04EC ++#define RADEON_OV0_GRAPHICS_KEY_CLR_HIGH 0x04F0 ++#define RADEON_OV0_H_INC 0x0480 ++#define RADEON_OV0_KEY_CNTL 0x04F4 ++# define RADEON_VIDEO_KEY_FN_MASK 0x00000003L ++# define RADEON_VIDEO_KEY_FN_FALSE 0x00000000L ++# define RADEON_VIDEO_KEY_FN_TRUE 0x00000001L ++# define RADEON_VIDEO_KEY_FN_EQ 0x00000002L ++# define RADEON_VIDEO_KEY_FN_NE 0x00000003L ++# define RADEON_GRAPHIC_KEY_FN_MASK 0x00000030L ++# define RADEON_GRAPHIC_KEY_FN_FALSE 0x00000000L ++# define RADEON_GRAPHIC_KEY_FN_TRUE 0x00000010L ++# define RADEON_GRAPHIC_KEY_FN_EQ 0x00000020L ++# define RADEON_GRAPHIC_KEY_FN_NE 0x00000030L ++# define RADEON_CMP_MIX_MASK 0x00000100L ++# define RADEON_CMP_MIX_OR 0x00000000L ++# define RADEON_CMP_MIX_AND 0x00000100L ++#define RADEON_OV0_LIN_TRANS_A 0x0d20 ++#define RADEON_OV0_LIN_TRANS_B 0x0d24 ++#define RADEON_OV0_LIN_TRANS_C 0x0d28 ++#define RADEON_OV0_LIN_TRANS_D 0x0d2c ++#define RADEON_OV0_LIN_TRANS_E 0x0d30 ++#define RADEON_OV0_LIN_TRANS_F 0x0d34 ++#define RADEON_OV0_P1_BLANK_LINES_AT_TOP 0x0430 ++# define RADEON_P1_BLNK_LN_AT_TOP_M1_MASK 0x00000fffL ++# define RADEON_P1_ACTIVE_LINES_M1 0x0fff0000L ++#define RADEON_OV0_P1_H_ACCUM_INIT 0x0488 ++#define RADEON_OV0_P1_V_ACCUM_INIT 0x0428 ++# define RADEON_OV0_P1_MAX_LN_IN_PER_LN_OUT 0x00000003L ++# define RADEON_OV0_P1_V_ACCUM_INIT_MASK 0x01ff8000L ++#define RADEON_OV0_P1_X_START_END 0x0494 ++#define RADEON_OV0_P2_X_START_END 0x0498 ++#define RADEON_OV0_P23_BLANK_LINES_AT_TOP 0x0434 ++# define RADEON_P23_BLNK_LN_AT_TOP_M1_MASK 0x000007ffL ++# define RADEON_P23_ACTIVE_LINES_M1 0x07ff0000L ++#define RADEON_OV0_P23_H_ACCUM_INIT 0x048C ++#define RADEON_OV0_P23_V_ACCUM_INIT 0x042C ++#define RADEON_OV0_P3_X_START_END 0x049C ++#define RADEON_OV0_REG_LOAD_CNTL 0x0410 ++# define RADEON_REG_LD_CTL_LOCK 0x00000001L ++# define RADEON_REG_LD_CTL_VBLANK_DURING_LOCK 0x00000002L ++# define RADEON_REG_LD_CTL_STALL_GUI_UNTIL_FLIP 0x00000004L ++# define RADEON_REG_LD_CTL_LOCK_READBACK 0x00000008L ++# define RADEON_REG_LD_CTL_FLIP_READBACK 0x00000010L ++#define RADEON_OV0_SCALE_CNTL 0x0420 ++# define RADEON_SCALER_HORZ_PICK_NEAREST 0x00000004L ++# define RADEON_SCALER_VERT_PICK_NEAREST 0x00000008L ++# define RADEON_SCALER_SIGNED_UV 0x00000010L ++# define RADEON_SCALER_GAMMA_SEL_MASK 0x00000060L ++# define RADEON_SCALER_GAMMA_SEL_BRIGHT 0x00000000L ++# define RADEON_SCALER_GAMMA_SEL_G22 0x00000020L ++# define RADEON_SCALER_GAMMA_SEL_G18 0x00000040L ++# define RADEON_SCALER_GAMMA_SEL_G14 0x00000060L ++# define RADEON_SCALER_COMCORE_SHIFT_UP_ONE 0x00000080L ++# define RADEON_SCALER_SURFAC_FORMAT 0x00000f00L ++# define RADEON_SCALER_SOURCE_15BPP 0x00000300L ++# define RADEON_SCALER_SOURCE_16BPP 0x00000400L ++# define RADEON_SCALER_SOURCE_32BPP 0x00000600L ++# define RADEON_SCALER_SOURCE_YUV9 0x00000900L ++# define RADEON_SCALER_SOURCE_YUV12 0x00000A00L ++# define RADEON_SCALER_SOURCE_VYUY422 0x00000B00L ++# define RADEON_SCALER_SOURCE_YVYU422 0x00000C00L ++# define RADEON_SCALER_ADAPTIVE_DEINT 0x00001000L ++# define RADEON_SCALER_TEMPORAL_DEINT 0x00002000L ++# define RADEON_SCALER_CRTC_SEL 0x00004000L ++# define RADEON_SCALER_SMART_SWITCH 0x00008000L ++# define RADEON_SCALER_BURST_PER_PLANE 0x007F0000L ++# define RADEON_SCALER_DOUBLE_BUFFER 0x01000000L ++# define RADEON_SCALER_DIS_LIMIT 0x08000000L ++# define RADEON_SCALER_LIN_TRANS_BYPASS 0x10000000L ++# define RADEON_SCALER_INT_EMU 0x20000000L ++# define RADEON_SCALER_ENABLE 0x40000000L ++# define RADEON_SCALER_SOFT_RESET 0x80000000L ++#define RADEON_OV0_STEP_BY 0x0484 ++#define RADEON_OV0_TEST 0x04F8 ++#define RADEON_OV0_V_INC 0x0424 ++#define RADEON_OV0_VID_BUF_PITCH0_VALUE 0x0460 ++#define RADEON_OV0_VID_BUF_PITCH1_VALUE 0x0464 ++#define RADEON_OV0_VID_BUF0_BASE_ADRS 0x0440 ++# define RADEON_VIF_BUF0_PITCH_SEL 0x00000001L ++# define RADEON_VIF_BUF0_TILE_ADRS 0x00000002L ++# define RADEON_VIF_BUF0_BASE_ADRS_MASK 0x03fffff0L ++# define RADEON_VIF_BUF0_1ST_LINE_LSBS_MASK 0x48000000L ++#define RADEON_OV0_VID_BUF1_BASE_ADRS 0x0444 ++# define RADEON_VIF_BUF1_PITCH_SEL 0x00000001L ++# define RADEON_VIF_BUF1_TILE_ADRS 0x00000002L ++# define RADEON_VIF_BUF1_BASE_ADRS_MASK 0x03fffff0L ++# define RADEON_VIF_BUF1_1ST_LINE_LSBS_MASK 0x48000000L ++#define RADEON_OV0_VID_BUF2_BASE_ADRS 0x0448 ++# define RADEON_VIF_BUF2_PITCH_SEL 0x00000001L ++# define RADEON_VIF_BUF2_TILE_ADRS 0x00000002L ++# define RADEON_VIF_BUF2_BASE_ADRS_MASK 0x03fffff0L ++# define RADEON_VIF_BUF2_1ST_LINE_LSBS_MASK 0x48000000L ++#define RADEON_OV0_VID_BUF3_BASE_ADRS 0x044C ++#define RADEON_OV0_VID_BUF4_BASE_ADRS 0x0450 ++#define RADEON_OV0_VID_BUF5_BASE_ADRS 0x0454 ++#define RADEON_OV0_VIDEO_KEY_CLR_HIGH 0x04E8 ++#define RADEON_OV0_VIDEO_KEY_CLR_LOW 0x04E4 ++#define RADEON_OV0_Y_X_START 0x0400 ++#define RADEON_OV0_Y_X_END 0x0404 ++#define RADEON_OV1_Y_X_START 0x0600 ++#define RADEON_OV1_Y_X_END 0x0604 ++#define RADEON_OVR_CLR 0x0230 ++#define RADEON_OVR_WID_LEFT_RIGHT 0x0234 ++#define RADEON_OVR_WID_TOP_BOTTOM 0x0238 ++ ++/* first capture unit */ ++ ++#define RADEON_CAP0_BUF0_OFFSET 0x0920 ++#define RADEON_CAP0_BUF1_OFFSET 0x0924 ++#define RADEON_CAP0_BUF0_EVEN_OFFSET 0x0928 ++#define RADEON_CAP0_BUF1_EVEN_OFFSET 0x092C ++ ++#define RADEON_CAP0_BUF_PITCH 0x0930 ++#define RADEON_CAP0_V_WINDOW 0x0934 ++#define RADEON_CAP0_H_WINDOW 0x0938 ++#define RADEON_CAP0_VBI0_OFFSET 0x093C ++#define RADEON_CAP0_VBI1_OFFSET 0x0940 ++#define RADEON_CAP0_VBI_V_WINDOW 0x0944 ++#define RADEON_CAP0_VBI_H_WINDOW 0x0948 ++#define RADEON_CAP0_PORT_MODE_CNTL 0x094C ++#define RADEON_CAP0_TRIG_CNTL 0x0950 ++#define RADEON_CAP0_DEBUG 0x0954 ++#define RADEON_CAP0_CONFIG 0x0958 ++# define RADEON_CAP0_CONFIG_CONTINUOS 0x00000001 ++# define RADEON_CAP0_CONFIG_START_FIELD_EVEN 0x00000002 ++# define RADEON_CAP0_CONFIG_START_BUF_GET 0x00000004 ++# define RADEON_CAP0_CONFIG_START_BUF_SET 0x00000008 ++# define RADEON_CAP0_CONFIG_BUF_TYPE_ALT 0x00000010 ++# define RADEON_CAP0_CONFIG_BUF_TYPE_FRAME 0x00000020 ++# define RADEON_CAP0_CONFIG_ONESHOT_MODE_FRAME 0x00000040 ++# define RADEON_CAP0_CONFIG_BUF_MODE_DOUBLE 0x00000080 ++# define RADEON_CAP0_CONFIG_BUF_MODE_TRIPLE 0x00000100 ++# define RADEON_CAP0_CONFIG_MIRROR_EN 0x00000200 ++# define RADEON_CAP0_CONFIG_ONESHOT_MIRROR_EN 0x00000400 ++# define RADEON_CAP0_CONFIG_VIDEO_SIGNED_UV 0x00000800 ++# define RADEON_CAP0_CONFIG_ANC_DECODE_EN 0x00001000 ++# define RADEON_CAP0_CONFIG_VBI_EN 0x00002000 ++# define RADEON_CAP0_CONFIG_SOFT_PULL_DOWN_EN 0x00004000 ++# define RADEON_CAP0_CONFIG_VIP_EXTEND_FLAG_EN 0x00008000 ++# define RADEON_CAP0_CONFIG_FAKE_FIELD_EN 0x00010000 ++# define RADEON_CAP0_CONFIG_ODD_ONE_MORE_LINE 0x00020000 ++# define RADEON_CAP0_CONFIG_EVEN_ONE_MORE_LINE 0x00040000 ++# define RADEON_CAP0_CONFIG_HORZ_DIVIDE_2 0x00080000 ++# define RADEON_CAP0_CONFIG_HORZ_DIVIDE_4 0x00100000 ++# define RADEON_CAP0_CONFIG_VERT_DIVIDE_2 0x00200000 ++# define RADEON_CAP0_CONFIG_VERT_DIVIDE_4 0x00400000 ++# define RADEON_CAP0_CONFIG_FORMAT_BROOKTREE 0x00000000 ++# define RADEON_CAP0_CONFIG_FORMAT_CCIR656 0x00800000 ++# define RADEON_CAP0_CONFIG_FORMAT_ZV 0x01000000 ++# define RADEON_CAP0_CONFIG_FORMAT_VIP 0x01800000 ++# define RADEON_CAP0_CONFIG_FORMAT_TRANSPORT 0x02000000 ++# define RADEON_CAP0_CONFIG_HORZ_DECIMATOR 0x04000000 ++# define RADEON_CAP0_CONFIG_VIDEO_IN_YVYU422 0x00000000 ++# define RADEON_CAP0_CONFIG_VIDEO_IN_VYUY422 0x20000000 ++# define RADEON_CAP0_CONFIG_VBI_DIVIDE_2 0x40000000 ++# define RADEON_CAP0_CONFIG_VBI_DIVIDE_4 0x80000000 ++#define RADEON_CAP0_ANC_ODD_OFFSET 0x095C ++#define RADEON_CAP0_ANC_EVEN_OFFSET 0x0960 ++#define RADEON_CAP0_ANC_H_WINDOW 0x0964 ++#define RADEON_CAP0_VIDEO_SYNC_TEST 0x0968 ++#define RADEON_CAP0_ONESHOT_BUF_OFFSET 0x096C ++#define RADEON_CAP0_BUF_STATUS 0x0970 ++/* #define RADEON_CAP0_DWNSC_XRATIO 0x0978 */ ++/* #define RADEON_CAP0_XSHARPNESS 0x097C */ ++#define RADEON_CAP0_VBI2_OFFSET 0x0980 ++#define RADEON_CAP0_VBI3_OFFSET 0x0984 ++#define RADEON_CAP0_ANC2_OFFSET 0x0988 ++#define RADEON_CAP0_ANC3_OFFSET 0x098C ++#define RADEON_VID_BUFFER_CONTROL 0x0900 ++ ++/* second capture unit */ ++ ++#define RADEON_CAP1_BUF0_OFFSET 0x0990 ++#define RADEON_CAP1_BUF1_OFFSET 0x0994 ++#define RADEON_CAP1_BUF0_EVEN_OFFSET 0x0998 ++#define RADEON_CAP1_BUF1_EVEN_OFFSET 0x099C ++ ++#define RADEON_CAP1_BUF_PITCH 0x09A0 ++#define RADEON_CAP1_V_WINDOW 0x09A4 ++#define RADEON_CAP1_H_WINDOW 0x09A8 ++#define RADEON_CAP1_VBI_ODD_OFFSET 0x09AC ++#define RADEON_CAP1_VBI_EVEN_OFFSET 0x09B0 ++#define RADEON_CAP1_VBI_V_WINDOW 0x09B4 ++#define RADEON_CAP1_VBI_H_WINDOW 0x09B8 ++#define RADEON_CAP1_PORT_MODE_CNTL 0x09BC ++#define RADEON_CAP1_TRIG_CNTL 0x09C0 ++#define RADEON_CAP1_DEBUG 0x09C4 ++#define RADEON_CAP1_CONFIG 0x09C8 ++#define RADEON_CAP1_ANC_ODD_OFFSET 0x09CC ++#define RADEON_CAP1_ANC_EVEN_OFFSET 0x09D0 ++#define RADEON_CAP1_ANC_H_WINDOW 0x09D4 ++#define RADEON_CAP1_VIDEO_SYNC_TEST 0x09D8 ++#define RADEON_CAP1_ONESHOT_BUF_OFFSET 0x09DC ++#define RADEON_CAP1_BUF_STATUS 0x09E0 ++#define RADEON_CAP1_DWNSC_XRATIO 0x09E8 ++#define RADEON_CAP1_XSHARPNESS 0x09EC ++ ++/* misc multimedia registers */ ++ ++#define RADEON_IDCT_RUNS 0x1F80 ++#define RADEON_IDCT_LEVELS 0x1F84 ++#define RADEON_IDCT_CONTROL 0x1FBC ++#define RADEON_IDCT_AUTH_CONTROL 0x1F88 ++#define RADEON_IDCT_AUTH 0x1F8C ++ ++#define RADEON_P2PLL_CNTL 0x002a /* P2PLL */ ++# define RADEON_P2PLL_RESET (1 << 0) ++# define RADEON_P2PLL_SLEEP (1 << 1) ++# define RADEON_P2PLL_PVG_MASK (7 << 11) ++# define RADEON_P2PLL_PVG_SHIFT 11 ++# define RADEON_P2PLL_ATOMIC_UPDATE_EN (1 << 16) ++# define RADEON_P2PLL_VGA_ATOMIC_UPDATE_EN (1 << 17) ++# define RADEON_P2PLL_ATOMIC_UPDATE_VSYNC (1 << 18) ++#define RADEON_P2PLL_DIV_0 0x002c ++# define RADEON_P2PLL_FB0_DIV_MASK 0x07ff ++# define RADEON_P2PLL_POST0_DIV_MASK 0x00070000 ++#define RADEON_P2PLL_REF_DIV 0x002B /* PLL */ ++# define RADEON_P2PLL_REF_DIV_MASK 0x03ff ++# define RADEON_P2PLL_ATOMIC_UPDATE_R (1 << 15) /* same as _W */ ++# define RADEON_P2PLL_ATOMIC_UPDATE_W (1 << 15) /* same as _R */ ++# define R300_PPLL_REF_DIV_ACC_MASK (0x3ff << 18) ++# define R300_PPLL_REF_DIV_ACC_SHIFT 18 ++#define RADEON_PALETTE_DATA 0x00b4 ++#define RADEON_PALETTE_30_DATA 0x00b8 ++#define RADEON_PALETTE_INDEX 0x00b0 ++#define RADEON_PCI_GART_PAGE 0x017c ++#define RADEON_PIXCLKS_CNTL 0x002d ++# define RADEON_PIX2CLK_SRC_SEL_MASK 0x03 ++# define RADEON_PIX2CLK_SRC_SEL_CPUCLK 0x00 ++# define RADEON_PIX2CLK_SRC_SEL_PSCANCLK 0x01 ++# define RADEON_PIX2CLK_SRC_SEL_BYTECLK 0x02 ++# define RADEON_PIX2CLK_SRC_SEL_P2PLLCLK 0x03 ++# define RADEON_PIX2CLK_ALWAYS_ONb (1<<6) ++# define RADEON_PIX2CLK_DAC_ALWAYS_ONb (1<<7) ++# define RADEON_PIXCLK_TV_SRC_SEL (1 << 8) ++# define RADEON_DISP_TVOUT_PIXCLK_TV_ALWAYS_ONb (1 << 9) ++# define R300_DVOCLK_ALWAYS_ONb (1 << 10) ++# define RADEON_PIXCLK_BLEND_ALWAYS_ONb (1 << 11) ++# define RADEON_PIXCLK_GV_ALWAYS_ONb (1 << 12) ++# define RADEON_PIXCLK_DIG_TMDS_ALWAYS_ONb (1 << 13) ++# define R300_PIXCLK_DVO_ALWAYS_ONb (1 << 13) ++# define RADEON_PIXCLK_LVDS_ALWAYS_ONb (1 << 14) ++# define RADEON_PIXCLK_TMDS_ALWAYS_ONb (1 << 15) ++# define R300_PIXCLK_TRANS_ALWAYS_ONb (1 << 16) ++# define R300_PIXCLK_TVO_ALWAYS_ONb (1 << 17) ++# define R300_P2G2CLK_ALWAYS_ONb (1 << 18) ++# define R300_P2G2CLK_DAC_ALWAYS_ONb (1 << 19) ++# define R300_DISP_DAC_PIXCLK_DAC2_BLANK_OFF (1 << 23) ++#define RADEON_PLANE_3D_MASK_C 0x1d44 ++#define RADEON_PLL_TEST_CNTL 0x0013 /* PLL */ ++# define RADEON_PLL_MASK_READ_B (1 << 9) ++#define RADEON_PMI_CAP_ID 0x0f5c /* PCI */ ++#define RADEON_PMI_DATA 0x0f63 /* PCI */ ++#define RADEON_PMI_NXT_CAP_PTR 0x0f5d /* PCI */ ++#define RADEON_PMI_PMC_REG 0x0f5e /* PCI */ ++#define RADEON_PMI_PMCSR_REG 0x0f60 /* PCI */ ++#define RADEON_PMI_REGISTER 0x0f5c /* PCI */ ++#define RADEON_PPLL_CNTL 0x0002 /* PLL */ ++# define RADEON_PPLL_RESET (1 << 0) ++# define RADEON_PPLL_SLEEP (1 << 1) ++# define RADEON_PPLL_PVG_MASK (7 << 11) ++# define RADEON_PPLL_PVG_SHIFT 11 ++# define RADEON_PPLL_ATOMIC_UPDATE_EN (1 << 16) ++# define RADEON_PPLL_VGA_ATOMIC_UPDATE_EN (1 << 17) ++# define RADEON_PPLL_ATOMIC_UPDATE_VSYNC (1 << 18) ++#define RADEON_PPLL_DIV_0 0x0004 /* PLL */ ++#define RADEON_PPLL_DIV_1 0x0005 /* PLL */ ++#define RADEON_PPLL_DIV_2 0x0006 /* PLL */ ++#define RADEON_PPLL_DIV_3 0x0007 /* PLL */ ++# define RADEON_PPLL_FB3_DIV_MASK 0x07ff ++# define RADEON_PPLL_POST3_DIV_MASK 0x00070000 ++#define RADEON_PPLL_REF_DIV 0x0003 /* PLL */ ++# define RADEON_PPLL_REF_DIV_MASK 0x03ff ++# define RADEON_PPLL_ATOMIC_UPDATE_R (1 << 15) /* same as _W */ ++# define RADEON_PPLL_ATOMIC_UPDATE_W (1 << 15) /* same as _R */ ++#define RADEON_PWR_MNGMT_CNTL_STATUS 0x0f60 /* PCI */ ++ ++#define RADEON_RBBM_GUICNTL 0x172c ++# define RADEON_HOST_DATA_SWAP_NONE (0 << 0) ++# define RADEON_HOST_DATA_SWAP_16BIT (1 << 0) ++# define RADEON_HOST_DATA_SWAP_32BIT (2 << 0) ++# define RADEON_HOST_DATA_SWAP_HDW (3 << 0) ++#define RADEON_RBBM_SOFT_RESET 0x00f0 ++# define RADEON_SOFT_RESET_CP (1 << 0) ++# define RADEON_SOFT_RESET_HI (1 << 1) ++# define RADEON_SOFT_RESET_SE (1 << 2) ++# define RADEON_SOFT_RESET_RE (1 << 3) ++# define RADEON_SOFT_RESET_PP (1 << 4) ++# define RADEON_SOFT_RESET_E2 (1 << 5) ++# define RADEON_SOFT_RESET_RB (1 << 6) ++# define RADEON_SOFT_RESET_HDP (1 << 7) ++#define RADEON_RBBM_STATUS 0x0e40 ++# define RADEON_RBBM_FIFOCNT_MASK 0x007f ++# define RADEON_RBBM_ACTIVE (1 << 31) ++#define RADEON_RB2D_DSTCACHE_CTLSTAT 0x342c ++# define RADEON_RB2D_DC_FLUSH (3 << 0) ++# define RADEON_RB2D_DC_FREE (3 << 2) ++# define RADEON_RB2D_DC_FLUSH_ALL 0xf ++# define RADEON_RB2D_DC_BUSY (1 << 31) ++#define RADEON_RB2D_DSTCACHE_MODE 0x3428 ++#define RADEON_DSTCACHE_CTLSTAT 0x1714 ++ ++#define RADEON_RB3D_ZCACHE_MODE 0x3250 ++#define RADEON_RB3D_ZCACHE_CTLSTAT 0x3254 ++# define RADEON_RB3D_ZC_FLUSH_ALL 0x5 ++#define RADEON_RB3D_DSTCACHE_MODE 0x3258 ++# define RADEON_RB3D_DC_CACHE_ENABLE (0) ++# define RADEON_RB3D_DC_2D_CACHE_DISABLE (1) ++# define RADEON_RB3D_DC_3D_CACHE_DISABLE (2) ++# define RADEON_RB3D_DC_CACHE_DISABLE (3) ++# define RADEON_RB3D_DC_2D_CACHE_LINESIZE_128 (1 << 2) ++# define RADEON_RB3D_DC_3D_CACHE_LINESIZE_128 (2 << 2) ++# define RADEON_RB3D_DC_2D_CACHE_AUTOFLUSH (1 << 8) ++# define RADEON_RB3D_DC_3D_CACHE_AUTOFLUSH (2 << 8) ++# define R200_RB3D_DC_2D_CACHE_AUTOFREE (1 << 10) ++# define R200_RB3D_DC_3D_CACHE_AUTOFREE (2 << 10) ++# define RADEON_RB3D_DC_FORCE_RMW (1 << 16) ++# define RADEON_RB3D_DC_DISABLE_RI_FILL (1 << 24) ++# define RADEON_RB3D_DC_DISABLE_RI_READ (1 << 25) ++ ++#define RADEON_RB3D_DSTCACHE_CTLSTAT 0x325C ++# define RADEON_RB3D_DC_FLUSH (3 << 0) ++# define RADEON_RB3D_DC_FREE (3 << 2) ++# define RADEON_RB3D_DC_FLUSH_ALL 0xf ++# define RADEON_RB3D_DC_BUSY (1 << 31) ++ ++#define RADEON_REG_BASE 0x0f18 /* PCI */ ++#define RADEON_REGPROG_INF 0x0f09 /* PCI */ ++#define RADEON_REVISION_ID 0x0f08 /* PCI */ ++ ++#define RADEON_SC_BOTTOM 0x164c ++#define RADEON_SC_BOTTOM_RIGHT 0x16f0 ++#define RADEON_SC_BOTTOM_RIGHT_C 0x1c8c ++#define RADEON_SC_LEFT 0x1640 ++#define RADEON_SC_RIGHT 0x1644 ++#define RADEON_SC_TOP 0x1648 ++#define RADEON_SC_TOP_LEFT 0x16ec ++#define RADEON_SC_TOP_LEFT_C 0x1c88 ++# define RADEON_SC_SIGN_MASK_LO 0x8000 ++# define RADEON_SC_SIGN_MASK_HI 0x80000000 ++#define RADEON_M_SPLL_REF_FB_DIV 0x000a /* PLL */ ++# define RADEON_M_SPLL_REF_DIV_SHIFT 0 ++# define RADEON_M_SPLL_REF_DIV_MASK 0xff ++# define RADEON_MPLL_FB_DIV_SHIFT 8 ++# define RADEON_MPLL_FB_DIV_MASK 0xff ++# define RADEON_SPLL_FB_DIV_SHIFT 16 ++# define RADEON_SPLL_FB_DIV_MASK 0xff ++#define RADEON_SCLK_CNTL 0x000d /* PLL */ ++# define RADEON_SCLK_SRC_SEL_MASK 0x0007 ++# define RADEON_DYN_STOP_LAT_MASK 0x00007ff8 ++# define RADEON_CP_MAX_DYN_STOP_LAT 0x0008 ++# define RADEON_SCLK_FORCEON_MASK 0xffff8000 ++# define RADEON_SCLK_FORCE_DISP2 (1<<15) ++# define RADEON_SCLK_FORCE_CP (1<<16) ++# define RADEON_SCLK_FORCE_HDP (1<<17) ++# define RADEON_SCLK_FORCE_DISP1 (1<<18) ++# define RADEON_SCLK_FORCE_TOP (1<<19) ++# define RADEON_SCLK_FORCE_E2 (1<<20) ++# define RADEON_SCLK_FORCE_SE (1<<21) ++# define RADEON_SCLK_FORCE_IDCT (1<<22) ++# define RADEON_SCLK_FORCE_VIP (1<<23) ++# define RADEON_SCLK_FORCE_RE (1<<24) ++# define RADEON_SCLK_FORCE_PB (1<<25) ++# define RADEON_SCLK_FORCE_TAM (1<<26) ++# define RADEON_SCLK_FORCE_TDM (1<<27) ++# define RADEON_SCLK_FORCE_RB (1<<28) ++# define RADEON_SCLK_FORCE_TV_SCLK (1<<29) ++# define RADEON_SCLK_FORCE_SUBPIC (1<<30) ++# define RADEON_SCLK_FORCE_OV0 (1<<31) ++# define R300_SCLK_FORCE_VAP (1<<21) ++# define R300_SCLK_FORCE_SR (1<<25) ++# define R300_SCLK_FORCE_PX (1<<26) ++# define R300_SCLK_FORCE_TX (1<<27) ++# define R300_SCLK_FORCE_US (1<<28) ++# define R300_SCLK_FORCE_SU (1<<30) ++#define R300_SCLK_CNTL2 0x1e /* PLL */ ++# define R300_SCLK_TCL_MAX_DYN_STOP_LAT (1<<10) ++# define R300_SCLK_GA_MAX_DYN_STOP_LAT (1<<11) ++# define R300_SCLK_CBA_MAX_DYN_STOP_LAT (1<<12) ++# define R300_SCLK_FORCE_TCL (1<<13) ++# define R300_SCLK_FORCE_CBA (1<<14) ++# define R300_SCLK_FORCE_GA (1<<15) ++#define RADEON_SCLK_MORE_CNTL 0x0035 /* PLL */ ++# define RADEON_SCLK_MORE_MAX_DYN_STOP_LAT 0x0007 ++# define RADEON_SCLK_MORE_FORCEON 0x0700 ++#define RADEON_SDRAM_MODE_REG 0x0158 ++#define RADEON_SEQ8_DATA 0x03c5 /* VGA */ ++#define RADEON_SEQ8_IDX 0x03c4 /* VGA */ ++#define RADEON_SNAPSHOT_F_COUNT 0x0244 ++#define RADEON_SNAPSHOT_VH_COUNTS 0x0240 ++#define RADEON_SNAPSHOT_VIF_COUNT 0x024c ++#define RADEON_SRC_OFFSET 0x15ac ++#define RADEON_SRC_PITCH 0x15b0 ++#define RADEON_SRC_PITCH_OFFSET 0x1428 ++#define RADEON_SRC_SC_BOTTOM 0x165c ++#define RADEON_SRC_SC_BOTTOM_RIGHT 0x16f4 ++#define RADEON_SRC_SC_RIGHT 0x1654 ++#define RADEON_SRC_X 0x1414 ++#define RADEON_SRC_X_Y 0x1590 ++#define RADEON_SRC_Y 0x1418 ++#define RADEON_SRC_Y_X 0x1434 ++#define RADEON_STATUS 0x0f06 /* PCI */ ++#define RADEON_SUBPIC_CNTL 0x0540 /* ? */ ++#define RADEON_SUB_CLASS 0x0f0a /* PCI */ ++#define RADEON_SURFACE_CNTL 0x0b00 ++# define RADEON_SURF_TRANSLATION_DIS (1 << 8) ++# define RADEON_NONSURF_AP0_SWP_16BPP (1 << 20) ++# define RADEON_NONSURF_AP0_SWP_32BPP (1 << 21) ++# define RADEON_NONSURF_AP1_SWP_16BPP (1 << 22) ++# define RADEON_NONSURF_AP1_SWP_32BPP (1 << 23) ++#define RADEON_SURFACE0_INFO 0x0b0c ++# define RADEON_SURF_TILE_COLOR_MACRO (0 << 16) ++# define RADEON_SURF_TILE_COLOR_BOTH (1 << 16) ++# define RADEON_SURF_TILE_DEPTH_32BPP (2 << 16) ++# define RADEON_SURF_TILE_DEPTH_16BPP (3 << 16) ++# define R200_SURF_TILE_NONE (0 << 16) ++# define R200_SURF_TILE_COLOR_MACRO (1 << 16) ++# define R200_SURF_TILE_COLOR_MICRO (2 << 16) ++# define R200_SURF_TILE_COLOR_BOTH (3 << 16) ++# define R200_SURF_TILE_DEPTH_32BPP (4 << 16) ++# define R200_SURF_TILE_DEPTH_16BPP (5 << 16) ++# define R300_SURF_TILE_NONE (0 << 16) ++# define R300_SURF_TILE_COLOR_MACRO (1 << 16) ++# define R300_SURF_TILE_DEPTH_32BPP (2 << 16) ++# define RADEON_SURF_AP0_SWP_16BPP (1 << 20) ++# define RADEON_SURF_AP0_SWP_32BPP (1 << 21) ++# define RADEON_SURF_AP1_SWP_16BPP (1 << 22) ++# define RADEON_SURF_AP1_SWP_32BPP (1 << 23) ++#define RADEON_SURFACE0_LOWER_BOUND 0x0b04 ++#define RADEON_SURFACE0_UPPER_BOUND 0x0b08 ++#define RADEON_SURFACE1_INFO 0x0b1c ++#define RADEON_SURFACE1_LOWER_BOUND 0x0b14 ++#define RADEON_SURFACE1_UPPER_BOUND 0x0b18 ++#define RADEON_SURFACE2_INFO 0x0b2c ++#define RADEON_SURFACE2_LOWER_BOUND 0x0b24 ++#define RADEON_SURFACE2_UPPER_BOUND 0x0b28 ++#define RADEON_SURFACE3_INFO 0x0b3c ++#define RADEON_SURFACE3_LOWER_BOUND 0x0b34 ++#define RADEON_SURFACE3_UPPER_BOUND 0x0b38 ++#define RADEON_SURFACE4_INFO 0x0b4c ++#define RADEON_SURFACE4_LOWER_BOUND 0x0b44 ++#define RADEON_SURFACE4_UPPER_BOUND 0x0b48 ++#define RADEON_SURFACE5_INFO 0x0b5c ++#define RADEON_SURFACE5_LOWER_BOUND 0x0b54 ++#define RADEON_SURFACE5_UPPER_BOUND 0x0b58 ++#define RADEON_SURFACE6_INFO 0x0b6c ++#define RADEON_SURFACE6_LOWER_BOUND 0x0b64 ++#define RADEON_SURFACE6_UPPER_BOUND 0x0b68 ++#define RADEON_SURFACE7_INFO 0x0b7c ++#define RADEON_SURFACE7_LOWER_BOUND 0x0b74 ++#define RADEON_SURFACE7_UPPER_BOUND 0x0b78 ++#define RADEON_SW_SEMAPHORE 0x013c ++ ++#define RADEON_TEST_DEBUG_CNTL 0x0120 ++#define RADEON_TEST_DEBUG_CNTL__TEST_DEBUG_OUT_EN 0x00000001 ++ ++#define RADEON_TEST_DEBUG_MUX 0x0124 ++#define RADEON_TEST_DEBUG_OUT 0x012c ++#define RADEON_TMDS_PLL_CNTL 0x02a8 ++#define RADEON_TMDS_TRANSMITTER_CNTL 0x02a4 ++# define RADEON_TMDS_TRANSMITTER_PLLEN 1 ++# define RADEON_TMDS_TRANSMITTER_PLLRST 2 ++#define RADEON_TRAIL_BRES_DEC 0x1614 ++#define RADEON_TRAIL_BRES_ERR 0x160c ++#define RADEON_TRAIL_BRES_INC 0x1610 ++#define RADEON_TRAIL_X 0x1618 ++#define RADEON_TRAIL_X_SUB 0x1620 ++ ++#define RADEON_VCLK_ECP_CNTL 0x0008 /* PLL */ ++# define RADEON_VCLK_SRC_SEL_MASK 0x03 ++# define RADEON_VCLK_SRC_SEL_CPUCLK 0x00 ++# define RADEON_VCLK_SRC_SEL_PSCANCLK 0x01 ++# define RADEON_VCLK_SRC_SEL_BYTECLK 0x02 ++# define RADEON_VCLK_SRC_SEL_PPLLCLK 0x03 ++# define RADEON_PIXCLK_ALWAYS_ONb (1<<6) ++# define RADEON_PIXCLK_DAC_ALWAYS_ONb (1<<7) ++# define R300_DISP_DAC_PIXCLK_DAC_BLANK_OFF (1<<23) ++ ++#define RADEON_VENDOR_ID 0x0f00 /* PCI */ ++#define RADEON_VGA_DDA_CONFIG 0x02e8 ++#define RADEON_VGA_DDA_ON_OFF 0x02ec ++#define RADEON_VID_BUFFER_CONTROL 0x0900 ++#define RADEON_VIDEOMUX_CNTL 0x0190 ++ ++ /* VIP bus */ ++#define RADEON_VIPH_CH0_DATA 0x0c00 ++#define RADEON_VIPH_CH1_DATA 0x0c04 ++#define RADEON_VIPH_CH2_DATA 0x0c08 ++#define RADEON_VIPH_CH3_DATA 0x0c0c ++#define RADEON_VIPH_CH0_ADDR 0x0c10 ++#define RADEON_VIPH_CH1_ADDR 0x0c14 ++#define RADEON_VIPH_CH2_ADDR 0x0c18 ++#define RADEON_VIPH_CH3_ADDR 0x0c1c ++#define RADEON_VIPH_CH0_SBCNT 0x0c20 ++#define RADEON_VIPH_CH1_SBCNT 0x0c24 ++#define RADEON_VIPH_CH2_SBCNT 0x0c28 ++#define RADEON_VIPH_CH3_SBCNT 0x0c2c ++#define RADEON_VIPH_CH0_ABCNT 0x0c30 ++#define RADEON_VIPH_CH1_ABCNT 0x0c34 ++#define RADEON_VIPH_CH2_ABCNT 0x0c38 ++#define RADEON_VIPH_CH3_ABCNT 0x0c3c ++#define RADEON_VIPH_CONTROL 0x0c40 ++# define RADEON_VIP_BUSY 0 ++# define RADEON_VIP_IDLE 1 ++# define RADEON_VIP_RESET 2 ++# define RADEON_VIPH_EN (1 << 21) ++#define RADEON_VIPH_DV_LAT 0x0c44 ++#define RADEON_VIPH_BM_CHUNK 0x0c48 ++#define RADEON_VIPH_DV_INT 0x0c4c ++#define RADEON_VIPH_TIMEOUT_STAT 0x0c50 ++#define RADEON_VIPH_TIMEOUT_STAT__VIPH_REG_STAT 0x00000010 ++#define RADEON_VIPH_TIMEOUT_STAT__VIPH_REG_AK 0x00000010 ++#define RADEON_VIPH_TIMEOUT_STAT__VIPH_REGR_DIS 0x01000000 ++ ++#define RADEON_VIPH_REG_DATA 0x0084 ++#define RADEON_VIPH_REG_ADDR 0x0080 ++ ++ ++#define RADEON_WAIT_UNTIL 0x1720 ++# define RADEON_WAIT_CRTC_PFLIP (1 << 0) ++# define RADEON_WAIT_RE_CRTC_VLINE (1 << 1) ++# define RADEON_WAIT_FE_CRTC_VLINE (1 << 2) ++# define RADEON_WAIT_CRTC_VLINE (1 << 3) ++# define RADEON_WAIT_DMA_VID_IDLE (1 << 8) ++# define RADEON_WAIT_DMA_GUI_IDLE (1 << 9) ++# define RADEON_WAIT_CMDFIFO (1 << 10) /* wait for CMDFIFO_ENTRIES */ ++# define RADEON_WAIT_OV0_FLIP (1 << 11) ++# define RADEON_WAIT_AGP_FLUSH (1 << 13) ++# define RADEON_WAIT_2D_IDLE (1 << 14) ++# define RADEON_WAIT_3D_IDLE (1 << 15) ++# define RADEON_WAIT_2D_IDLECLEAN (1 << 16) ++# define RADEON_WAIT_3D_IDLECLEAN (1 << 17) ++# define RADEON_WAIT_HOST_IDLECLEAN (1 << 18) ++# define RADEON_CMDFIFO_ENTRIES_SHIFT 10 ++# define RADEON_CMDFIFO_ENTRIES_MASK 0x7f ++# define RADEON_WAIT_VAP_IDLE (1 << 28) ++# define RADEON_WAIT_BOTH_CRTC_PFLIP (1 << 30) ++# define RADEON_ENG_DISPLAY_SELECT_CRTC0 (0 << 31) ++# define RADEON_ENG_DISPLAY_SELECT_CRTC1 (1 << 31) ++ ++#define RADEON_X_MPLL_REF_FB_DIV 0x000a /* PLL */ ++#define RADEON_XCLK_CNTL 0x000d /* PLL */ ++#define RADEON_XDLL_CNTL 0x000c /* PLL */ ++#define RADEON_XPLL_CNTL 0x000b /* PLL */ ++ ++ ++ ++ /* Registers for 3D/TCL */ ++#define RADEON_PP_BORDER_COLOR_0 0x1d40 ++#define RADEON_PP_BORDER_COLOR_1 0x1d44 ++#define RADEON_PP_BORDER_COLOR_2 0x1d48 ++#define RADEON_PP_CNTL 0x1c38 ++# define RADEON_STIPPLE_ENABLE (1 << 0) ++# define RADEON_SCISSOR_ENABLE (1 << 1) ++# define RADEON_PATTERN_ENABLE (1 << 2) ++# define RADEON_SHADOW_ENABLE (1 << 3) ++# define RADEON_TEX_ENABLE_MASK (0xf << 4) ++# define RADEON_TEX_0_ENABLE (1 << 4) ++# define RADEON_TEX_1_ENABLE (1 << 5) ++# define RADEON_TEX_2_ENABLE (1 << 6) ++# define RADEON_TEX_3_ENABLE (1 << 7) ++# define RADEON_TEX_BLEND_ENABLE_MASK (0xf << 12) ++# define RADEON_TEX_BLEND_0_ENABLE (1 << 12) ++# define RADEON_TEX_BLEND_1_ENABLE (1 << 13) ++# define RADEON_TEX_BLEND_2_ENABLE (1 << 14) ++# define RADEON_TEX_BLEND_3_ENABLE (1 << 15) ++# define RADEON_PLANAR_YUV_ENABLE (1 << 20) ++# define RADEON_SPECULAR_ENABLE (1 << 21) ++# define RADEON_FOG_ENABLE (1 << 22) ++# define RADEON_ALPHA_TEST_ENABLE (1 << 23) ++# define RADEON_ANTI_ALIAS_NONE (0 << 24) ++# define RADEON_ANTI_ALIAS_LINE (1 << 24) ++# define RADEON_ANTI_ALIAS_POLY (2 << 24) ++# define RADEON_ANTI_ALIAS_LINE_POLY (3 << 24) ++# define RADEON_BUMP_MAP_ENABLE (1 << 26) ++# define RADEON_BUMPED_MAP_T0 (0 << 27) ++# define RADEON_BUMPED_MAP_T1 (1 << 27) ++# define RADEON_BUMPED_MAP_T2 (2 << 27) ++# define RADEON_TEX_3D_ENABLE_0 (1 << 29) ++# define RADEON_TEX_3D_ENABLE_1 (1 << 30) ++# define RADEON_MC_ENABLE (1 << 31) ++#define RADEON_PP_FOG_COLOR 0x1c18 ++# define RADEON_FOG_COLOR_MASK 0x00ffffff ++# define RADEON_FOG_VERTEX (0 << 24) ++# define RADEON_FOG_TABLE (1 << 24) ++# define RADEON_FOG_USE_DEPTH (0 << 25) ++# define RADEON_FOG_USE_DIFFUSE_ALPHA (2 << 25) ++# define RADEON_FOG_USE_SPEC_ALPHA (3 << 25) ++#define RADEON_PP_LUM_MATRIX 0x1d00 ++#define RADEON_PP_MISC 0x1c14 ++# define RADEON_REF_ALPHA_MASK 0x000000ff ++# define RADEON_ALPHA_TEST_FAIL (0 << 8) ++# define RADEON_ALPHA_TEST_LESS (1 << 8) ++# define RADEON_ALPHA_TEST_LEQUAL (2 << 8) ++# define RADEON_ALPHA_TEST_EQUAL (3 << 8) ++# define RADEON_ALPHA_TEST_GEQUAL (4 << 8) ++# define RADEON_ALPHA_TEST_GREATER (5 << 8) ++# define RADEON_ALPHA_TEST_NEQUAL (6 << 8) ++# define RADEON_ALPHA_TEST_PASS (7 << 8) ++# define RADEON_ALPHA_TEST_OP_MASK (7 << 8) ++# define RADEON_CHROMA_FUNC_FAIL (0 << 16) ++# define RADEON_CHROMA_FUNC_PASS (1 << 16) ++# define RADEON_CHROMA_FUNC_NEQUAL (2 << 16) ++# define RADEON_CHROMA_FUNC_EQUAL (3 << 16) ++# define RADEON_CHROMA_KEY_NEAREST (0 << 18) ++# define RADEON_CHROMA_KEY_ZERO (1 << 18) ++# define RADEON_SHADOW_ID_AUTO_INC (1 << 20) ++# define RADEON_SHADOW_FUNC_EQUAL (0 << 21) ++# define RADEON_SHADOW_FUNC_NEQUAL (1 << 21) ++# define RADEON_SHADOW_PASS_1 (0 << 22) ++# define RADEON_SHADOW_PASS_2 (1 << 22) ++# define RADEON_RIGHT_HAND_CUBE_D3D (0 << 24) ++# define RADEON_RIGHT_HAND_CUBE_OGL (1 << 24) ++#define RADEON_PP_ROT_MATRIX_0 0x1d58 ++#define RADEON_PP_ROT_MATRIX_1 0x1d5c ++#define RADEON_PP_TXFILTER_0 0x1c54 ++#define RADEON_PP_TXFILTER_1 0x1c6c ++#define RADEON_PP_TXFILTER_2 0x1c84 ++# define RADEON_MAG_FILTER_NEAREST (0 << 0) ++# define RADEON_MAG_FILTER_LINEAR (1 << 0) ++# define RADEON_MAG_FILTER_MASK (1 << 0) ++# define RADEON_MIN_FILTER_NEAREST (0 << 1) ++# define RADEON_MIN_FILTER_LINEAR (1 << 1) ++# define RADEON_MIN_FILTER_NEAREST_MIP_NEAREST (2 << 1) ++# define RADEON_MIN_FILTER_NEAREST_MIP_LINEAR (3 << 1) ++# define RADEON_MIN_FILTER_LINEAR_MIP_NEAREST (6 << 1) ++# define RADEON_MIN_FILTER_LINEAR_MIP_LINEAR (7 << 1) ++# define RADEON_MIN_FILTER_ANISO_NEAREST (8 << 1) ++# define RADEON_MIN_FILTER_ANISO_LINEAR (9 << 1) ++# define RADEON_MIN_FILTER_ANISO_NEAREST_MIP_NEAREST (10 << 1) ++# define RADEON_MIN_FILTER_ANISO_NEAREST_MIP_LINEAR (11 << 1) ++# define RADEON_MIN_FILTER_MASK (15 << 1) ++# define RADEON_MAX_ANISO_1_TO_1 (0 << 5) ++# define RADEON_MAX_ANISO_2_TO_1 (1 << 5) ++# define RADEON_MAX_ANISO_4_TO_1 (2 << 5) ++# define RADEON_MAX_ANISO_8_TO_1 (3 << 5) ++# define RADEON_MAX_ANISO_16_TO_1 (4 << 5) ++# define RADEON_MAX_ANISO_MASK (7 << 5) ++# define RADEON_LOD_BIAS_MASK (0xff << 8) ++# define RADEON_LOD_BIAS_SHIFT 8 ++# define RADEON_MAX_MIP_LEVEL_MASK (0x0f << 16) ++# define RADEON_MAX_MIP_LEVEL_SHIFT 16 ++# define RADEON_YUV_TO_RGB (1 << 20) ++# define RADEON_YUV_TEMPERATURE_COOL (0 << 21) ++# define RADEON_YUV_TEMPERATURE_HOT (1 << 21) ++# define RADEON_YUV_TEMPERATURE_MASK (1 << 21) ++# define RADEON_WRAPEN_S (1 << 22) ++# define RADEON_CLAMP_S_WRAP (0 << 23) ++# define RADEON_CLAMP_S_MIRROR (1 << 23) ++# define RADEON_CLAMP_S_CLAMP_LAST (2 << 23) ++# define RADEON_CLAMP_S_MIRROR_CLAMP_LAST (3 << 23) ++# define RADEON_CLAMP_S_CLAMP_BORDER (4 << 23) ++# define RADEON_CLAMP_S_MIRROR_CLAMP_BORDER (5 << 23) ++# define RADEON_CLAMP_S_CLAMP_GL (6 << 23) ++# define RADEON_CLAMP_S_MIRROR_CLAMP_GL (7 << 23) ++# define RADEON_CLAMP_S_MASK (7 << 23) ++# define RADEON_WRAPEN_T (1 << 26) ++# define RADEON_CLAMP_T_WRAP (0 << 27) ++# define RADEON_CLAMP_T_MIRROR (1 << 27) ++# define RADEON_CLAMP_T_CLAMP_LAST (2 << 27) ++# define RADEON_CLAMP_T_MIRROR_CLAMP_LAST (3 << 27) ++# define RADEON_CLAMP_T_CLAMP_BORDER (4 << 27) ++# define RADEON_CLAMP_T_MIRROR_CLAMP_BORDER (5 << 27) ++# define RADEON_CLAMP_T_CLAMP_GL (6 << 27) ++# define RADEON_CLAMP_T_MIRROR_CLAMP_GL (7 << 27) ++# define RADEON_CLAMP_T_MASK (7 << 27) ++# define RADEON_BORDER_MODE_OGL (0 << 31) ++# define RADEON_BORDER_MODE_D3D (1 << 31) ++#define RADEON_PP_TXFORMAT_0 0x1c58 ++#define RADEON_PP_TXFORMAT_1 0x1c70 ++#define RADEON_PP_TXFORMAT_2 0x1c88 ++# define RADEON_TXFORMAT_I8 (0 << 0) ++# define RADEON_TXFORMAT_AI88 (1 << 0) ++# define RADEON_TXFORMAT_RGB332 (2 << 0) ++# define RADEON_TXFORMAT_ARGB1555 (3 << 0) ++# define RADEON_TXFORMAT_RGB565 (4 << 0) ++# define RADEON_TXFORMAT_ARGB4444 (5 << 0) ++# define RADEON_TXFORMAT_ARGB8888 (6 << 0) ++# define RADEON_TXFORMAT_RGBA8888 (7 << 0) ++# define RADEON_TXFORMAT_Y8 (8 << 0) ++# define RADEON_TXFORMAT_VYUY422 (10 << 0) ++# define RADEON_TXFORMAT_YVYU422 (11 << 0) ++# define RADEON_TXFORMAT_DXT1 (12 << 0) ++# define RADEON_TXFORMAT_DXT23 (14 << 0) ++# define RADEON_TXFORMAT_DXT45 (15 << 0) ++# define RADEON_TXFORMAT_FORMAT_MASK (31 << 0) ++# define RADEON_TXFORMAT_FORMAT_SHIFT 0 ++# define RADEON_TXFORMAT_APPLE_YUV_MODE (1 << 5) ++# define RADEON_TXFORMAT_ALPHA_IN_MAP (1 << 6) ++# define RADEON_TXFORMAT_NON_POWER2 (1 << 7) ++# define RADEON_TXFORMAT_WIDTH_MASK (15 << 8) ++# define RADEON_TXFORMAT_WIDTH_SHIFT 8 ++# define RADEON_TXFORMAT_HEIGHT_MASK (15 << 12) ++# define RADEON_TXFORMAT_HEIGHT_SHIFT 12 ++# define RADEON_TXFORMAT_F5_WIDTH_MASK (15 << 16) ++# define RADEON_TXFORMAT_F5_WIDTH_SHIFT 16 ++# define RADEON_TXFORMAT_F5_HEIGHT_MASK (15 << 20) ++# define RADEON_TXFORMAT_F5_HEIGHT_SHIFT 20 ++# define RADEON_TXFORMAT_ST_ROUTE_STQ0 (0 << 24) ++# define RADEON_TXFORMAT_ST_ROUTE_MASK (3 << 24) ++# define RADEON_TXFORMAT_ST_ROUTE_STQ1 (1 << 24) ++# define RADEON_TXFORMAT_ST_ROUTE_STQ2 (2 << 24) ++# define RADEON_TXFORMAT_ENDIAN_NO_SWAP (0 << 26) ++# define RADEON_TXFORMAT_ENDIAN_16BPP_SWAP (1 << 26) ++# define RADEON_TXFORMAT_ENDIAN_32BPP_SWAP (2 << 26) ++# define RADEON_TXFORMAT_ENDIAN_HALFDW_SWAP (3 << 26) ++# define RADEON_TXFORMAT_ALPHA_MASK_ENABLE (1 << 28) ++# define RADEON_TXFORMAT_CHROMA_KEY_ENABLE (1 << 29) ++# define RADEON_TXFORMAT_CUBIC_MAP_ENABLE (1 << 30) ++# define RADEON_TXFORMAT_PERSPECTIVE_ENABLE (1 << 31) ++#define RADEON_PP_CUBIC_FACES_0 0x1d24 ++#define RADEON_PP_CUBIC_FACES_1 0x1d28 ++#define RADEON_PP_CUBIC_FACES_2 0x1d2c ++# define RADEON_FACE_WIDTH_1_SHIFT 0 ++# define RADEON_FACE_HEIGHT_1_SHIFT 4 ++# define RADEON_FACE_WIDTH_1_MASK (0xf << 0) ++# define RADEON_FACE_HEIGHT_1_MASK (0xf << 4) ++# define RADEON_FACE_WIDTH_2_SHIFT 8 ++# define RADEON_FACE_HEIGHT_2_SHIFT 12 ++# define RADEON_FACE_WIDTH_2_MASK (0xf << 8) ++# define RADEON_FACE_HEIGHT_2_MASK (0xf << 12) ++# define RADEON_FACE_WIDTH_3_SHIFT 16 ++# define RADEON_FACE_HEIGHT_3_SHIFT 20 ++# define RADEON_FACE_WIDTH_3_MASK (0xf << 16) ++# define RADEON_FACE_HEIGHT_3_MASK (0xf << 20) ++# define RADEON_FACE_WIDTH_4_SHIFT 24 ++# define RADEON_FACE_HEIGHT_4_SHIFT 28 ++# define RADEON_FACE_WIDTH_4_MASK (0xf << 24) ++# define RADEON_FACE_HEIGHT_4_MASK (0xf << 28) ++ ++#define RADEON_PP_TXOFFSET_0 0x1c5c ++#define RADEON_PP_TXOFFSET_1 0x1c74 ++#define RADEON_PP_TXOFFSET_2 0x1c8c ++# define RADEON_TXO_ENDIAN_NO_SWAP (0 << 0) ++# define RADEON_TXO_ENDIAN_BYTE_SWAP (1 << 0) ++# define RADEON_TXO_ENDIAN_WORD_SWAP (2 << 0) ++# define RADEON_TXO_ENDIAN_HALFDW_SWAP (3 << 0) ++# define RADEON_TXO_MACRO_LINEAR (0 << 2) ++# define RADEON_TXO_MACRO_TILE (1 << 2) ++# define RADEON_TXO_MICRO_LINEAR (0 << 3) ++# define RADEON_TXO_MICRO_TILE_X2 (1 << 3) ++# define RADEON_TXO_MICRO_TILE_OPT (2 << 3) ++# define RADEON_TXO_OFFSET_MASK 0xffffffe0 ++# define RADEON_TXO_OFFSET_SHIFT 5 ++ ++#define RADEON_PP_CUBIC_OFFSET_T0_0 0x1dd0 /* bits [31:5] */ ++#define RADEON_PP_CUBIC_OFFSET_T0_1 0x1dd4 ++#define RADEON_PP_CUBIC_OFFSET_T0_2 0x1dd8 ++#define RADEON_PP_CUBIC_OFFSET_T0_3 0x1ddc ++#define RADEON_PP_CUBIC_OFFSET_T0_4 0x1de0 ++#define RADEON_PP_CUBIC_OFFSET_T1_0 0x1e00 ++#define RADEON_PP_CUBIC_OFFSET_T1_1 0x1e04 ++#define RADEON_PP_CUBIC_OFFSET_T1_2 0x1e08 ++#define RADEON_PP_CUBIC_OFFSET_T1_3 0x1e0c ++#define RADEON_PP_CUBIC_OFFSET_T1_4 0x1e10 ++#define RADEON_PP_CUBIC_OFFSET_T2_0 0x1e14 ++#define RADEON_PP_CUBIC_OFFSET_T2_1 0x1e18 ++#define RADEON_PP_CUBIC_OFFSET_T2_2 0x1e1c ++#define RADEON_PP_CUBIC_OFFSET_T2_3 0x1e20 ++#define RADEON_PP_CUBIC_OFFSET_T2_4 0x1e24 ++ ++#define RADEON_PP_TEX_SIZE_0 0x1d04 /* NPOT */ ++#define RADEON_PP_TEX_SIZE_1 0x1d0c ++#define RADEON_PP_TEX_SIZE_2 0x1d14 ++# define RADEON_TEX_USIZE_MASK (0x7ff << 0) ++# define RADEON_TEX_USIZE_SHIFT 0 ++# define RADEON_TEX_VSIZE_MASK (0x7ff << 16) ++# define RADEON_TEX_VSIZE_SHIFT 16 ++# define RADEON_SIGNED_RGB_MASK (1 << 30) ++# define RADEON_SIGNED_RGB_SHIFT 30 ++# define RADEON_SIGNED_ALPHA_MASK (1 << 31) ++# define RADEON_SIGNED_ALPHA_SHIFT 31 ++#define RADEON_PP_TEX_PITCH_0 0x1d08 /* NPOT */ ++#define RADEON_PP_TEX_PITCH_1 0x1d10 /* NPOT */ ++#define RADEON_PP_TEX_PITCH_2 0x1d18 /* NPOT */ ++/* note: bits 13-5: 32 byte aligned stride of texture map */ ++ ++#define RADEON_PP_TXCBLEND_0 0x1c60 ++#define RADEON_PP_TXCBLEND_1 0x1c78 ++#define RADEON_PP_TXCBLEND_2 0x1c90 ++# define RADEON_COLOR_ARG_A_SHIFT 0 ++# define RADEON_COLOR_ARG_A_MASK (0x1f << 0) ++# define RADEON_COLOR_ARG_A_ZERO (0 << 0) ++# define RADEON_COLOR_ARG_A_CURRENT_COLOR (2 << 0) ++# define RADEON_COLOR_ARG_A_CURRENT_ALPHA (3 << 0) ++# define RADEON_COLOR_ARG_A_DIFFUSE_COLOR (4 << 0) ++# define RADEON_COLOR_ARG_A_DIFFUSE_ALPHA (5 << 0) ++# define RADEON_COLOR_ARG_A_SPECULAR_COLOR (6 << 0) ++# define RADEON_COLOR_ARG_A_SPECULAR_ALPHA (7 << 0) ++# define RADEON_COLOR_ARG_A_TFACTOR_COLOR (8 << 0) ++# define RADEON_COLOR_ARG_A_TFACTOR_ALPHA (9 << 0) ++# define RADEON_COLOR_ARG_A_T0_COLOR (10 << 0) ++# define RADEON_COLOR_ARG_A_T0_ALPHA (11 << 0) ++# define RADEON_COLOR_ARG_A_T1_COLOR (12 << 0) ++# define RADEON_COLOR_ARG_A_T1_ALPHA (13 << 0) ++# define RADEON_COLOR_ARG_A_T2_COLOR (14 << 0) ++# define RADEON_COLOR_ARG_A_T2_ALPHA (15 << 0) ++# define RADEON_COLOR_ARG_A_T3_COLOR (16 << 0) ++# define RADEON_COLOR_ARG_A_T3_ALPHA (17 << 0) ++# define RADEON_COLOR_ARG_B_SHIFT 5 ++# define RADEON_COLOR_ARG_B_MASK (0x1f << 5) ++# define RADEON_COLOR_ARG_B_ZERO (0 << 5) ++# define RADEON_COLOR_ARG_B_CURRENT_COLOR (2 << 5) ++# define RADEON_COLOR_ARG_B_CURRENT_ALPHA (3 << 5) ++# define RADEON_COLOR_ARG_B_DIFFUSE_COLOR (4 << 5) ++# define RADEON_COLOR_ARG_B_DIFFUSE_ALPHA (5 << 5) ++# define RADEON_COLOR_ARG_B_SPECULAR_COLOR (6 << 5) ++# define RADEON_COLOR_ARG_B_SPECULAR_ALPHA (7 << 5) ++# define RADEON_COLOR_ARG_B_TFACTOR_COLOR (8 << 5) ++# define RADEON_COLOR_ARG_B_TFACTOR_ALPHA (9 << 5) ++# define RADEON_COLOR_ARG_B_T0_COLOR (10 << 5) ++# define RADEON_COLOR_ARG_B_T0_ALPHA (11 << 5) ++# define RADEON_COLOR_ARG_B_T1_COLOR (12 << 5) ++# define RADEON_COLOR_ARG_B_T1_ALPHA (13 << 5) ++# define RADEON_COLOR_ARG_B_T2_COLOR (14 << 5) ++# define RADEON_COLOR_ARG_B_T2_ALPHA (15 << 5) ++# define RADEON_COLOR_ARG_B_T3_COLOR (16 << 5) ++# define RADEON_COLOR_ARG_B_T3_ALPHA (17 << 5) ++# define RADEON_COLOR_ARG_C_SHIFT 10 ++# define RADEON_COLOR_ARG_C_MASK (0x1f << 10) ++# define RADEON_COLOR_ARG_C_ZERO (0 << 10) ++# define RADEON_COLOR_ARG_C_CURRENT_COLOR (2 << 10) ++# define RADEON_COLOR_ARG_C_CURRENT_ALPHA (3 << 10) ++# define RADEON_COLOR_ARG_C_DIFFUSE_COLOR (4 << 10) ++# define RADEON_COLOR_ARG_C_DIFFUSE_ALPHA (5 << 10) ++# define RADEON_COLOR_ARG_C_SPECULAR_COLOR (6 << 10) ++# define RADEON_COLOR_ARG_C_SPECULAR_ALPHA (7 << 10) ++# define RADEON_COLOR_ARG_C_TFACTOR_COLOR (8 << 10) ++# define RADEON_COLOR_ARG_C_TFACTOR_ALPHA (9 << 10) ++# define RADEON_COLOR_ARG_C_T0_COLOR (10 << 10) ++# define RADEON_COLOR_ARG_C_T0_ALPHA (11 << 10) ++# define RADEON_COLOR_ARG_C_T1_COLOR (12 << 10) ++# define RADEON_COLOR_ARG_C_T1_ALPHA (13 << 10) ++# define RADEON_COLOR_ARG_C_T2_COLOR (14 << 10) ++# define RADEON_COLOR_ARG_C_T2_ALPHA (15 << 10) ++# define RADEON_COLOR_ARG_C_T3_COLOR (16 << 10) ++# define RADEON_COLOR_ARG_C_T3_ALPHA (17 << 10) ++# define RADEON_COMP_ARG_A (1 << 15) ++# define RADEON_COMP_ARG_A_SHIFT 15 ++# define RADEON_COMP_ARG_B (1 << 16) ++# define RADEON_COMP_ARG_B_SHIFT 16 ++# define RADEON_COMP_ARG_C (1 << 17) ++# define RADEON_COMP_ARG_C_SHIFT 17 ++# define RADEON_BLEND_CTL_MASK (7 << 18) ++# define RADEON_BLEND_CTL_ADD (0 << 18) ++# define RADEON_BLEND_CTL_SUBTRACT (1 << 18) ++# define RADEON_BLEND_CTL_ADDSIGNED (2 << 18) ++# define RADEON_BLEND_CTL_BLEND (3 << 18) ++# define RADEON_BLEND_CTL_DOT3 (4 << 18) ++# define RADEON_SCALE_SHIFT 21 ++# define RADEON_SCALE_MASK (3 << 21) ++# define RADEON_SCALE_1X (0 << 21) ++# define RADEON_SCALE_2X (1 << 21) ++# define RADEON_SCALE_4X (2 << 21) ++# define RADEON_CLAMP_TX (1 << 23) ++# define RADEON_T0_EQ_TCUR (1 << 24) ++# define RADEON_T1_EQ_TCUR (1 << 25) ++# define RADEON_T2_EQ_TCUR (1 << 26) ++# define RADEON_T3_EQ_TCUR (1 << 27) ++# define RADEON_COLOR_ARG_MASK 0x1f ++# define RADEON_COMP_ARG_SHIFT 15 ++#define RADEON_PP_TXABLEND_0 0x1c64 ++#define RADEON_PP_TXABLEND_1 0x1c7c ++#define RADEON_PP_TXABLEND_2 0x1c94 ++# define RADEON_ALPHA_ARG_A_SHIFT 0 ++# define RADEON_ALPHA_ARG_A_MASK (0xf << 0) ++# define RADEON_ALPHA_ARG_A_ZERO (0 << 0) ++# define RADEON_ALPHA_ARG_A_CURRENT_ALPHA (1 << 0) ++# define RADEON_ALPHA_ARG_A_DIFFUSE_ALPHA (2 << 0) ++# define RADEON_ALPHA_ARG_A_SPECULAR_ALPHA (3 << 0) ++# define RADEON_ALPHA_ARG_A_TFACTOR_ALPHA (4 << 0) ++# define RADEON_ALPHA_ARG_A_T0_ALPHA (5 << 0) ++# define RADEON_ALPHA_ARG_A_T1_ALPHA (6 << 0) ++# define RADEON_ALPHA_ARG_A_T2_ALPHA (7 << 0) ++# define RADEON_ALPHA_ARG_A_T3_ALPHA (8 << 0) ++# define RADEON_ALPHA_ARG_B_SHIFT 4 ++# define RADEON_ALPHA_ARG_B_MASK (0xf << 4) ++# define RADEON_ALPHA_ARG_B_ZERO (0 << 4) ++# define RADEON_ALPHA_ARG_B_CURRENT_ALPHA (1 << 4) ++# define RADEON_ALPHA_ARG_B_DIFFUSE_ALPHA (2 << 4) ++# define RADEON_ALPHA_ARG_B_SPECULAR_ALPHA (3 << 4) ++# define RADEON_ALPHA_ARG_B_TFACTOR_ALPHA (4 << 4) ++# define RADEON_ALPHA_ARG_B_T0_ALPHA (5 << 4) ++# define RADEON_ALPHA_ARG_B_T1_ALPHA (6 << 4) ++# define RADEON_ALPHA_ARG_B_T2_ALPHA (7 << 4) ++# define RADEON_ALPHA_ARG_B_T3_ALPHA (8 << 4) ++# define RADEON_ALPHA_ARG_C_SHIFT 8 ++# define RADEON_ALPHA_ARG_C_MASK (0xf << 8) ++# define RADEON_ALPHA_ARG_C_ZERO (0 << 8) ++# define RADEON_ALPHA_ARG_C_CURRENT_ALPHA (1 << 8) ++# define RADEON_ALPHA_ARG_C_DIFFUSE_ALPHA (2 << 8) ++# define RADEON_ALPHA_ARG_C_SPECULAR_ALPHA (3 << 8) ++# define RADEON_ALPHA_ARG_C_TFACTOR_ALPHA (4 << 8) ++# define RADEON_ALPHA_ARG_C_T0_ALPHA (5 << 8) ++# define RADEON_ALPHA_ARG_C_T1_ALPHA (6 << 8) ++# define RADEON_ALPHA_ARG_C_T2_ALPHA (7 << 8) ++# define RADEON_ALPHA_ARG_C_T3_ALPHA (8 << 8) ++# define RADEON_DOT_ALPHA_DONT_REPLICATE (1 << 9) ++# define RADEON_ALPHA_ARG_MASK 0xf ++ ++#define RADEON_PP_TFACTOR_0 0x1c68 ++#define RADEON_PP_TFACTOR_1 0x1c80 ++#define RADEON_PP_TFACTOR_2 0x1c98 ++ ++#define RADEON_RB3D_BLENDCNTL 0x1c20 ++# define RADEON_COMB_FCN_MASK (3 << 12) ++# define RADEON_COMB_FCN_ADD_CLAMP (0 << 12) ++# define RADEON_COMB_FCN_ADD_NOCLAMP (1 << 12) ++# define RADEON_COMB_FCN_SUB_CLAMP (2 << 12) ++# define RADEON_COMB_FCN_SUB_NOCLAMP (3 << 12) ++# define RADEON_SRC_BLEND_GL_ZERO (32 << 16) ++# define RADEON_SRC_BLEND_GL_ONE (33 << 16) ++# define RADEON_SRC_BLEND_GL_SRC_COLOR (34 << 16) ++# define RADEON_SRC_BLEND_GL_ONE_MINUS_SRC_COLOR (35 << 16) ++# define RADEON_SRC_BLEND_GL_DST_COLOR (36 << 16) ++# define RADEON_SRC_BLEND_GL_ONE_MINUS_DST_COLOR (37 << 16) ++# define RADEON_SRC_BLEND_GL_SRC_ALPHA (38 << 16) ++# define RADEON_SRC_BLEND_GL_ONE_MINUS_SRC_ALPHA (39 << 16) ++# define RADEON_SRC_BLEND_GL_DST_ALPHA (40 << 16) ++# define RADEON_SRC_BLEND_GL_ONE_MINUS_DST_ALPHA (41 << 16) ++# define RADEON_SRC_BLEND_GL_SRC_ALPHA_SATURATE (42 << 16) ++# define RADEON_SRC_BLEND_MASK (63 << 16) ++# define RADEON_DST_BLEND_GL_ZERO (32 << 24) ++# define RADEON_DST_BLEND_GL_ONE (33 << 24) ++# define RADEON_DST_BLEND_GL_SRC_COLOR (34 << 24) ++# define RADEON_DST_BLEND_GL_ONE_MINUS_SRC_COLOR (35 << 24) ++# define RADEON_DST_BLEND_GL_DST_COLOR (36 << 24) ++# define RADEON_DST_BLEND_GL_ONE_MINUS_DST_COLOR (37 << 24) ++# define RADEON_DST_BLEND_GL_SRC_ALPHA (38 << 24) ++# define RADEON_DST_BLEND_GL_ONE_MINUS_SRC_ALPHA (39 << 24) ++# define RADEON_DST_BLEND_GL_DST_ALPHA (40 << 24) ++# define RADEON_DST_BLEND_GL_ONE_MINUS_DST_ALPHA (41 << 24) ++# define RADEON_DST_BLEND_MASK (63 << 24) ++#define RADEON_RB3D_CNTL 0x1c3c ++# define RADEON_ALPHA_BLEND_ENABLE (1 << 0) ++# define RADEON_PLANE_MASK_ENABLE (1 << 1) ++# define RADEON_DITHER_ENABLE (1 << 2) ++# define RADEON_ROUND_ENABLE (1 << 3) ++# define RADEON_SCALE_DITHER_ENABLE (1 << 4) ++# define RADEON_DITHER_INIT (1 << 5) ++# define RADEON_ROP_ENABLE (1 << 6) ++# define RADEON_STENCIL_ENABLE (1 << 7) ++# define RADEON_Z_ENABLE (1 << 8) ++# define RADEON_DEPTH_XZ_OFFEST_ENABLE (1 << 9) ++# define RADEON_RB3D_COLOR_FORMAT_SHIFT 10 ++ ++# define RADEON_COLOR_FORMAT_ARGB1555 3 ++# define RADEON_COLOR_FORMAT_RGB565 4 ++# define RADEON_COLOR_FORMAT_ARGB8888 6 ++# define RADEON_COLOR_FORMAT_RGB332 7 ++# define RADEON_COLOR_FORMAT_Y8 8 ++# define RADEON_COLOR_FORMAT_RGB8 9 ++# define RADEON_COLOR_FORMAT_YUV422_VYUY 11 ++# define RADEON_COLOR_FORMAT_YUV422_YVYU 12 ++# define RADEON_COLOR_FORMAT_aYUV444 14 ++# define RADEON_COLOR_FORMAT_ARGB4444 15 ++ ++# define RADEON_CLRCMP_FLIP_ENABLE (1 << 14) ++#define RADEON_RB3D_COLOROFFSET 0x1c40 ++# define RADEON_COLOROFFSET_MASK 0xfffffff0 ++#define RADEON_RB3D_COLORPITCH 0x1c48 ++# define RADEON_COLORPITCH_MASK 0x000001ff8 ++# define RADEON_COLOR_TILE_ENABLE (1 << 16) ++# define RADEON_COLOR_MICROTILE_ENABLE (1 << 17) ++# define RADEON_COLOR_ENDIAN_NO_SWAP (0 << 18) ++# define RADEON_COLOR_ENDIAN_WORD_SWAP (1 << 18) ++# define RADEON_COLOR_ENDIAN_DWORD_SWAP (2 << 18) ++#define RADEON_RB3D_DEPTHOFFSET 0x1c24 ++#define RADEON_RB3D_DEPTHPITCH 0x1c28 ++# define RADEON_DEPTHPITCH_MASK 0x00001ff8 ++# define RADEON_DEPTH_ENDIAN_NO_SWAP (0 << 18) ++# define RADEON_DEPTH_ENDIAN_WORD_SWAP (1 << 18) ++# define RADEON_DEPTH_ENDIAN_DWORD_SWAP (2 << 18) ++#define RADEON_RB3D_PLANEMASK 0x1d84 ++#define RADEON_RB3D_ROPCNTL 0x1d80 ++# define RADEON_ROP_MASK (15 << 8) ++# define RADEON_ROP_CLEAR (0 << 8) ++# define RADEON_ROP_NOR (1 << 8) ++# define RADEON_ROP_AND_INVERTED (2 << 8) ++# define RADEON_ROP_COPY_INVERTED (3 << 8) ++# define RADEON_ROP_AND_REVERSE (4 << 8) ++# define RADEON_ROP_INVERT (5 << 8) ++# define RADEON_ROP_XOR (6 << 8) ++# define RADEON_ROP_NAND (7 << 8) ++# define RADEON_ROP_AND (8 << 8) ++# define RADEON_ROP_EQUIV (9 << 8) ++# define RADEON_ROP_NOOP (10 << 8) ++# define RADEON_ROP_OR_INVERTED (11 << 8) ++# define RADEON_ROP_COPY (12 << 8) ++# define RADEON_ROP_OR_REVERSE (13 << 8) ++# define RADEON_ROP_OR (14 << 8) ++# define RADEON_ROP_SET (15 << 8) ++#define RADEON_RB3D_STENCILREFMASK 0x1d7c ++# define RADEON_STENCIL_REF_SHIFT 0 ++# define RADEON_STENCIL_REF_MASK (0xff << 0) ++# define RADEON_STENCIL_MASK_SHIFT 16 ++# define RADEON_STENCIL_VALUE_MASK (0xff << 16) ++# define RADEON_STENCIL_WRITEMASK_SHIFT 24 ++# define RADEON_STENCIL_WRITE_MASK (0xff << 24) ++#define RADEON_RB3D_ZSTENCILCNTL 0x1c2c ++# define RADEON_DEPTH_FORMAT_MASK (0xf << 0) ++# define RADEON_DEPTH_FORMAT_16BIT_INT_Z (0 << 0) ++# define RADEON_DEPTH_FORMAT_24BIT_INT_Z (2 << 0) ++# define RADEON_DEPTH_FORMAT_24BIT_FLOAT_Z (3 << 0) ++# define RADEON_DEPTH_FORMAT_32BIT_INT_Z (4 << 0) ++# define RADEON_DEPTH_FORMAT_32BIT_FLOAT_Z (5 << 0) ++# define RADEON_DEPTH_FORMAT_16BIT_FLOAT_W (7 << 0) ++# define RADEON_DEPTH_FORMAT_24BIT_FLOAT_W (9 << 0) ++# define RADEON_DEPTH_FORMAT_32BIT_FLOAT_W (11 << 0) ++# define RADEON_Z_TEST_NEVER (0 << 4) ++# define RADEON_Z_TEST_LESS (1 << 4) ++# define RADEON_Z_TEST_LEQUAL (2 << 4) ++# define RADEON_Z_TEST_EQUAL (3 << 4) ++# define RADEON_Z_TEST_GEQUAL (4 << 4) ++# define RADEON_Z_TEST_GREATER (5 << 4) ++# define RADEON_Z_TEST_NEQUAL (6 << 4) ++# define RADEON_Z_TEST_ALWAYS (7 << 4) ++# define RADEON_Z_TEST_MASK (7 << 4) ++# define RADEON_STENCIL_TEST_NEVER (0 << 12) ++# define RADEON_STENCIL_TEST_LESS (1 << 12) ++# define RADEON_STENCIL_TEST_LEQUAL (2 << 12) ++# define RADEON_STENCIL_TEST_EQUAL (3 << 12) ++# define RADEON_STENCIL_TEST_GEQUAL (4 << 12) ++# define RADEON_STENCIL_TEST_GREATER (5 << 12) ++# define RADEON_STENCIL_TEST_NEQUAL (6 << 12) ++# define RADEON_STENCIL_TEST_ALWAYS (7 << 12) ++# define RADEON_STENCIL_TEST_MASK (0x7 << 12) ++# define RADEON_STENCIL_FAIL_KEEP (0 << 16) ++# define RADEON_STENCIL_FAIL_ZERO (1 << 16) ++# define RADEON_STENCIL_FAIL_REPLACE (2 << 16) ++# define RADEON_STENCIL_FAIL_INC (3 << 16) ++# define RADEON_STENCIL_FAIL_DEC (4 << 16) ++# define RADEON_STENCIL_FAIL_INVERT (5 << 16) ++# define RADEON_STENCIL_FAIL_MASK (0x7 << 16) ++# define RADEON_STENCIL_ZPASS_KEEP (0 << 20) ++# define RADEON_STENCIL_ZPASS_ZERO (1 << 20) ++# define RADEON_STENCIL_ZPASS_REPLACE (2 << 20) ++# define RADEON_STENCIL_ZPASS_INC (3 << 20) ++# define RADEON_STENCIL_ZPASS_DEC (4 << 20) ++# define RADEON_STENCIL_ZPASS_INVERT (5 << 20) ++# define RADEON_STENCIL_ZPASS_MASK (0x7 << 20) ++# define RADEON_STENCIL_ZFAIL_KEEP (0 << 24) ++# define RADEON_STENCIL_ZFAIL_ZERO (1 << 24) ++# define RADEON_STENCIL_ZFAIL_REPLACE (2 << 24) ++# define RADEON_STENCIL_ZFAIL_INC (3 << 24) ++# define RADEON_STENCIL_ZFAIL_DEC (4 << 24) ++# define RADEON_STENCIL_ZFAIL_INVERT (5 << 24) ++# define RADEON_STENCIL_ZFAIL_MASK (0x7 << 24) ++# define RADEON_Z_COMPRESSION_ENABLE (1 << 28) ++# define RADEON_FORCE_Z_DIRTY (1 << 29) ++# define RADEON_Z_WRITE_ENABLE (1 << 30) ++#define RADEON_RE_LINE_PATTERN 0x1cd0 ++# define RADEON_LINE_PATTERN_MASK 0x0000ffff ++# define RADEON_LINE_REPEAT_COUNT_SHIFT 16 ++# define RADEON_LINE_PATTERN_START_SHIFT 24 ++# define RADEON_LINE_PATTERN_LITTLE_BIT_ORDER (0 << 28) ++# define RADEON_LINE_PATTERN_BIG_BIT_ORDER (1 << 28) ++# define RADEON_LINE_PATTERN_AUTO_RESET (1 << 29) ++#define RADEON_RE_LINE_STATE 0x1cd4 ++# define RADEON_LINE_CURRENT_PTR_SHIFT 0 ++# define RADEON_LINE_CURRENT_COUNT_SHIFT 8 ++#define RADEON_RE_MISC 0x26c4 ++# define RADEON_STIPPLE_COORD_MASK 0x1f ++# define RADEON_STIPPLE_X_OFFSET_SHIFT 0 ++# define RADEON_STIPPLE_X_OFFSET_MASK (0x1f << 0) ++# define RADEON_STIPPLE_Y_OFFSET_SHIFT 8 ++# define RADEON_STIPPLE_Y_OFFSET_MASK (0x1f << 8) ++# define RADEON_STIPPLE_LITTLE_BIT_ORDER (0 << 16) ++# define RADEON_STIPPLE_BIG_BIT_ORDER (1 << 16) ++#define RADEON_RE_SOLID_COLOR 0x1c1c ++#define RADEON_RE_TOP_LEFT 0x26c0 ++# define RADEON_RE_LEFT_SHIFT 0 ++# define RADEON_RE_TOP_SHIFT 16 ++#define RADEON_RE_WIDTH_HEIGHT 0x1c44 ++# define RADEON_RE_WIDTH_SHIFT 0 ++# define RADEON_RE_HEIGHT_SHIFT 16 ++ ++#define RADEON_SE_CNTL 0x1c4c ++# define RADEON_FFACE_CULL_CW (0 << 0) ++# define RADEON_FFACE_CULL_CCW (1 << 0) ++# define RADEON_FFACE_CULL_DIR_MASK (1 << 0) ++# define RADEON_BFACE_CULL (0 << 1) ++# define RADEON_BFACE_SOLID (3 << 1) ++# define RADEON_FFACE_CULL (0 << 3) ++# define RADEON_FFACE_SOLID (3 << 3) ++# define RADEON_FFACE_CULL_MASK (3 << 3) ++# define RADEON_BADVTX_CULL_DISABLE (1 << 5) ++# define RADEON_FLAT_SHADE_VTX_0 (0 << 6) ++# define RADEON_FLAT_SHADE_VTX_1 (1 << 6) ++# define RADEON_FLAT_SHADE_VTX_2 (2 << 6) ++# define RADEON_FLAT_SHADE_VTX_LAST (3 << 6) ++# define RADEON_DIFFUSE_SHADE_SOLID (0 << 8) ++# define RADEON_DIFFUSE_SHADE_FLAT (1 << 8) ++# define RADEON_DIFFUSE_SHADE_GOURAUD (2 << 8) ++# define RADEON_DIFFUSE_SHADE_MASK (3 << 8) ++# define RADEON_ALPHA_SHADE_SOLID (0 << 10) ++# define RADEON_ALPHA_SHADE_FLAT (1 << 10) ++# define RADEON_ALPHA_SHADE_GOURAUD (2 << 10) ++# define RADEON_ALPHA_SHADE_MASK (3 << 10) ++# define RADEON_SPECULAR_SHADE_SOLID (0 << 12) ++# define RADEON_SPECULAR_SHADE_FLAT (1 << 12) ++# define RADEON_SPECULAR_SHADE_GOURAUD (2 << 12) ++# define RADEON_SPECULAR_SHADE_MASK (3 << 12) ++# define RADEON_FOG_SHADE_SOLID (0 << 14) ++# define RADEON_FOG_SHADE_FLAT (1 << 14) ++# define RADEON_FOG_SHADE_GOURAUD (2 << 14) ++# define RADEON_FOG_SHADE_MASK (3 << 14) ++# define RADEON_ZBIAS_ENABLE_POINT (1 << 16) ++# define RADEON_ZBIAS_ENABLE_LINE (1 << 17) ++# define RADEON_ZBIAS_ENABLE_TRI (1 << 18) ++# define RADEON_WIDELINE_ENABLE (1 << 20) ++# define RADEON_VPORT_XY_XFORM_ENABLE (1 << 24) ++# define RADEON_VPORT_Z_XFORM_ENABLE (1 << 25) ++# define RADEON_VTX_PIX_CENTER_D3D (0 << 27) ++# define RADEON_VTX_PIX_CENTER_OGL (1 << 27) ++# define RADEON_ROUND_MODE_TRUNC (0 << 28) ++# define RADEON_ROUND_MODE_ROUND (1 << 28) ++# define RADEON_ROUND_MODE_ROUND_EVEN (2 << 28) ++# define RADEON_ROUND_MODE_ROUND_ODD (3 << 28) ++# define RADEON_ROUND_PREC_16TH_PIX (0 << 30) ++# define RADEON_ROUND_PREC_8TH_PIX (1 << 30) ++# define RADEON_ROUND_PREC_4TH_PIX (2 << 30) ++# define RADEON_ROUND_PREC_HALF_PIX (3 << 30) ++#define R200_RE_CNTL 0x1c50 ++# define R200_STIPPLE_ENABLE 0x1 ++# define R200_SCISSOR_ENABLE 0x2 ++# define R200_PATTERN_ENABLE 0x4 ++# define R200_PERSPECTIVE_ENABLE 0x8 ++# define R200_POINT_SMOOTH 0x20 ++# define R200_VTX_STQ0_D3D 0x00010000 ++# define R200_VTX_STQ1_D3D 0x00040000 ++# define R200_VTX_STQ2_D3D 0x00100000 ++# define R200_VTX_STQ3_D3D 0x00400000 ++# define R200_VTX_STQ4_D3D 0x01000000 ++# define R200_VTX_STQ5_D3D 0x04000000 ++#define RADEON_SE_CNTL_STATUS 0x2140 ++# define RADEON_VC_NO_SWAP (0 << 0) ++# define RADEON_VC_16BIT_SWAP (1 << 0) ++# define RADEON_VC_32BIT_SWAP (2 << 0) ++# define RADEON_VC_HALF_DWORD_SWAP (3 << 0) ++# define RADEON_TCL_BYPASS (1 << 8) ++#define RADEON_SE_COORD_FMT 0x1c50 ++# define RADEON_VTX_XY_PRE_MULT_1_OVER_W0 (1 << 0) ++# define RADEON_VTX_Z_PRE_MULT_1_OVER_W0 (1 << 1) ++# define RADEON_VTX_ST0_NONPARAMETRIC (1 << 8) ++# define RADEON_VTX_ST1_NONPARAMETRIC (1 << 9) ++# define RADEON_VTX_ST2_NONPARAMETRIC (1 << 10) ++# define RADEON_VTX_ST3_NONPARAMETRIC (1 << 11) ++# define RADEON_VTX_W0_NORMALIZE (1 << 12) ++# define RADEON_VTX_W0_IS_NOT_1_OVER_W0 (1 << 16) ++# define RADEON_VTX_ST0_PRE_MULT_1_OVER_W0 (1 << 17) ++# define RADEON_VTX_ST1_PRE_MULT_1_OVER_W0 (1 << 19) ++# define RADEON_VTX_ST2_PRE_MULT_1_OVER_W0 (1 << 21) ++# define RADEON_VTX_ST3_PRE_MULT_1_OVER_W0 (1 << 23) ++# define RADEON_TEX1_W_ROUTING_USE_W0 (0 << 26) ++# define RADEON_TEX1_W_ROUTING_USE_Q1 (1 << 26) ++#define RADEON_SE_LINE_WIDTH 0x1db8 ++#define RADEON_SE_TCL_LIGHT_MODEL_CTL 0x226c ++# define RADEON_LIGHTING_ENABLE (1 << 0) ++# define RADEON_LIGHT_IN_MODELSPACE (1 << 1) ++# define RADEON_LOCAL_VIEWER (1 << 2) ++# define RADEON_NORMALIZE_NORMALS (1 << 3) ++# define RADEON_RESCALE_NORMALS (1 << 4) ++# define RADEON_SPECULAR_LIGHTS (1 << 5) ++# define RADEON_DIFFUSE_SPECULAR_COMBINE (1 << 6) ++# define RADEON_LIGHT_ALPHA (1 << 7) ++# define RADEON_LOCAL_LIGHT_VEC_GL (1 << 8) ++# define RADEON_LIGHT_NO_NORMAL_AMBIENT_ONLY (1 << 9) ++# define RADEON_LM_SOURCE_STATE_PREMULT 0 ++# define RADEON_LM_SOURCE_STATE_MULT 1 ++# define RADEON_LM_SOURCE_VERTEX_DIFFUSE 2 ++# define RADEON_LM_SOURCE_VERTEX_SPECULAR 3 ++# define RADEON_EMISSIVE_SOURCE_SHIFT 16 ++# define RADEON_AMBIENT_SOURCE_SHIFT 18 ++# define RADEON_DIFFUSE_SOURCE_SHIFT 20 ++# define RADEON_SPECULAR_SOURCE_SHIFT 22 ++#define RADEON_SE_TCL_MATERIAL_AMBIENT_RED 0x2220 ++#define RADEON_SE_TCL_MATERIAL_AMBIENT_GREEN 0x2224 ++#define RADEON_SE_TCL_MATERIAL_AMBIENT_BLUE 0x2228 ++#define RADEON_SE_TCL_MATERIAL_AMBIENT_ALPHA 0x222c ++#define RADEON_SE_TCL_MATERIAL_DIFFUSE_RED 0x2230 ++#define RADEON_SE_TCL_MATERIAL_DIFFUSE_GREEN 0x2234 ++#define RADEON_SE_TCL_MATERIAL_DIFFUSE_BLUE 0x2238 ++#define RADEON_SE_TCL_MATERIAL_DIFFUSE_ALPHA 0x223c ++#define RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED 0x2210 ++#define RADEON_SE_TCL_MATERIAL_EMMISSIVE_GREEN 0x2214 ++#define RADEON_SE_TCL_MATERIAL_EMMISSIVE_BLUE 0x2218 ++#define RADEON_SE_TCL_MATERIAL_EMMISSIVE_ALPHA 0x221c ++#define RADEON_SE_TCL_MATERIAL_SPECULAR_RED 0x2240 ++#define RADEON_SE_TCL_MATERIAL_SPECULAR_GREEN 0x2244 ++#define RADEON_SE_TCL_MATERIAL_SPECULAR_BLUE 0x2248 ++#define RADEON_SE_TCL_MATERIAL_SPECULAR_ALPHA 0x224c ++#define RADEON_SE_TCL_MATRIX_SELECT_0 0x225c ++# define RADEON_MODELVIEW_0_SHIFT 0 ++# define RADEON_MODELVIEW_1_SHIFT 4 ++# define RADEON_MODELVIEW_2_SHIFT 8 ++# define RADEON_MODELVIEW_3_SHIFT 12 ++# define RADEON_IT_MODELVIEW_0_SHIFT 16 ++# define RADEON_IT_MODELVIEW_1_SHIFT 20 ++# define RADEON_IT_MODELVIEW_2_SHIFT 24 ++# define RADEON_IT_MODELVIEW_3_SHIFT 28 ++#define RADEON_SE_TCL_MATRIX_SELECT_1 0x2260 ++# define RADEON_MODELPROJECT_0_SHIFT 0 ++# define RADEON_MODELPROJECT_1_SHIFT 4 ++# define RADEON_MODELPROJECT_2_SHIFT 8 ++# define RADEON_MODELPROJECT_3_SHIFT 12 ++# define RADEON_TEXMAT_0_SHIFT 16 ++# define RADEON_TEXMAT_1_SHIFT 20 ++# define RADEON_TEXMAT_2_SHIFT 24 ++# define RADEON_TEXMAT_3_SHIFT 28 ++ ++ ++#define RADEON_SE_TCL_OUTPUT_VTX_FMT 0x2254 ++# define RADEON_TCL_VTX_W0 (1 << 0) ++# define RADEON_TCL_VTX_FP_DIFFUSE (1 << 1) ++# define RADEON_TCL_VTX_FP_ALPHA (1 << 2) ++# define RADEON_TCL_VTX_PK_DIFFUSE (1 << 3) ++# define RADEON_TCL_VTX_FP_SPEC (1 << 4) ++# define RADEON_TCL_VTX_FP_FOG (1 << 5) ++# define RADEON_TCL_VTX_PK_SPEC (1 << 6) ++# define RADEON_TCL_VTX_ST0 (1 << 7) ++# define RADEON_TCL_VTX_ST1 (1 << 8) ++# define RADEON_TCL_VTX_Q1 (1 << 9) ++# define RADEON_TCL_VTX_ST2 (1 << 10) ++# define RADEON_TCL_VTX_Q2 (1 << 11) ++# define RADEON_TCL_VTX_ST3 (1 << 12) ++# define RADEON_TCL_VTX_Q3 (1 << 13) ++# define RADEON_TCL_VTX_Q0 (1 << 14) ++# define RADEON_TCL_VTX_WEIGHT_COUNT_SHIFT 15 ++# define RADEON_TCL_VTX_NORM0 (1 << 18) ++# define RADEON_TCL_VTX_XY1 (1 << 27) ++# define RADEON_TCL_VTX_Z1 (1 << 28) ++# define RADEON_TCL_VTX_W1 (1 << 29) ++# define RADEON_TCL_VTX_NORM1 (1 << 30) ++# define RADEON_TCL_VTX_Z0 (1 << 31) ++ ++#define RADEON_SE_TCL_OUTPUT_VTX_SEL 0x2258 ++# define RADEON_TCL_COMPUTE_XYZW (1 << 0) ++# define RADEON_TCL_COMPUTE_DIFFUSE (1 << 1) ++# define RADEON_TCL_COMPUTE_SPECULAR (1 << 2) ++# define RADEON_TCL_FORCE_NAN_IF_COLOR_NAN (1 << 3) ++# define RADEON_TCL_FORCE_INORDER_PROC (1 << 4) ++# define RADEON_TCL_TEX_INPUT_TEX_0 0 ++# define RADEON_TCL_TEX_INPUT_TEX_1 1 ++# define RADEON_TCL_TEX_INPUT_TEX_2 2 ++# define RADEON_TCL_TEX_INPUT_TEX_3 3 ++# define RADEON_TCL_TEX_COMPUTED_TEX_0 8 ++# define RADEON_TCL_TEX_COMPUTED_TEX_1 9 ++# define RADEON_TCL_TEX_COMPUTED_TEX_2 10 ++# define RADEON_TCL_TEX_COMPUTED_TEX_3 11 ++# define RADEON_TCL_TEX_0_OUTPUT_SHIFT 16 ++# define RADEON_TCL_TEX_1_OUTPUT_SHIFT 20 ++# define RADEON_TCL_TEX_2_OUTPUT_SHIFT 24 ++# define RADEON_TCL_TEX_3_OUTPUT_SHIFT 28 ++ ++#define RADEON_SE_TCL_PER_LIGHT_CTL_0 0x2270 ++# define RADEON_LIGHT_0_ENABLE (1 << 0) ++# define RADEON_LIGHT_0_ENABLE_AMBIENT (1 << 1) ++# define RADEON_LIGHT_0_ENABLE_SPECULAR (1 << 2) ++# define RADEON_LIGHT_0_IS_LOCAL (1 << 3) ++# define RADEON_LIGHT_0_IS_SPOT (1 << 4) ++# define RADEON_LIGHT_0_DUAL_CONE (1 << 5) ++# define RADEON_LIGHT_0_ENABLE_RANGE_ATTEN (1 << 6) ++# define RADEON_LIGHT_0_CONSTANT_RANGE_ATTEN (1 << 7) ++# define RADEON_LIGHT_0_SHIFT 0 ++# define RADEON_LIGHT_1_ENABLE (1 << 16) ++# define RADEON_LIGHT_1_ENABLE_AMBIENT (1 << 17) ++# define RADEON_LIGHT_1_ENABLE_SPECULAR (1 << 18) ++# define RADEON_LIGHT_1_IS_LOCAL (1 << 19) ++# define RADEON_LIGHT_1_IS_SPOT (1 << 20) ++# define RADEON_LIGHT_1_DUAL_CONE (1 << 21) ++# define RADEON_LIGHT_1_ENABLE_RANGE_ATTEN (1 << 22) ++# define RADEON_LIGHT_1_CONSTANT_RANGE_ATTEN (1 << 23) ++# define RADEON_LIGHT_1_SHIFT 16 ++#define RADEON_SE_TCL_PER_LIGHT_CTL_1 0x2274 ++# define RADEON_LIGHT_2_SHIFT 0 ++# define RADEON_LIGHT_3_SHIFT 16 ++#define RADEON_SE_TCL_PER_LIGHT_CTL_2 0x2278 ++# define RADEON_LIGHT_4_SHIFT 0 ++# define RADEON_LIGHT_5_SHIFT 16 ++#define RADEON_SE_TCL_PER_LIGHT_CTL_3 0x227c ++# define RADEON_LIGHT_6_SHIFT 0 ++# define RADEON_LIGHT_7_SHIFT 16 ++ ++#define RADEON_SE_TCL_SHININESS 0x2250 ++ ++#define RADEON_SE_TCL_TEXTURE_PROC_CTL 0x2268 ++# define RADEON_TEXGEN_TEXMAT_0_ENABLE (1 << 0) ++# define RADEON_TEXGEN_TEXMAT_1_ENABLE (1 << 1) ++# define RADEON_TEXGEN_TEXMAT_2_ENABLE (1 << 2) ++# define RADEON_TEXGEN_TEXMAT_3_ENABLE (1 << 3) ++# define RADEON_TEXMAT_0_ENABLE (1 << 4) ++# define RADEON_TEXMAT_1_ENABLE (1 << 5) ++# define RADEON_TEXMAT_2_ENABLE (1 << 6) ++# define RADEON_TEXMAT_3_ENABLE (1 << 7) ++# define RADEON_TEXGEN_INPUT_MASK 0xf ++# define RADEON_TEXGEN_INPUT_TEXCOORD_0 0 ++# define RADEON_TEXGEN_INPUT_TEXCOORD_1 1 ++# define RADEON_TEXGEN_INPUT_TEXCOORD_2 2 ++# define RADEON_TEXGEN_INPUT_TEXCOORD_3 3 ++# define RADEON_TEXGEN_INPUT_OBJ 4 ++# define RADEON_TEXGEN_INPUT_EYE 5 ++# define RADEON_TEXGEN_INPUT_EYE_NORMAL 6 ++# define RADEON_TEXGEN_INPUT_EYE_REFLECT 7 ++# define RADEON_TEXGEN_INPUT_EYE_NORMALIZED 8 ++# define RADEON_TEXGEN_0_INPUT_SHIFT 16 ++# define RADEON_TEXGEN_1_INPUT_SHIFT 20 ++# define RADEON_TEXGEN_2_INPUT_SHIFT 24 ++# define RADEON_TEXGEN_3_INPUT_SHIFT 28 ++ ++#define RADEON_SE_TCL_UCP_VERT_BLEND_CTL 0x2264 ++# define RADEON_UCP_IN_CLIP_SPACE (1 << 0) ++# define RADEON_UCP_IN_MODEL_SPACE (1 << 1) ++# define RADEON_UCP_ENABLE_0 (1 << 2) ++# define RADEON_UCP_ENABLE_1 (1 << 3) ++# define RADEON_UCP_ENABLE_2 (1 << 4) ++# define RADEON_UCP_ENABLE_3 (1 << 5) ++# define RADEON_UCP_ENABLE_4 (1 << 6) ++# define RADEON_UCP_ENABLE_5 (1 << 7) ++# define RADEON_TCL_FOG_MASK (3 << 8) ++# define RADEON_TCL_FOG_DISABLE (0 << 8) ++# define RADEON_TCL_FOG_EXP (1 << 8) ++# define RADEON_TCL_FOG_EXP2 (2 << 8) ++# define RADEON_TCL_FOG_LINEAR (3 << 8) ++# define RADEON_RNG_BASED_FOG (1 << 10) ++# define RADEON_LIGHT_TWOSIDE (1 << 11) ++# define RADEON_BLEND_OP_COUNT_MASK (7 << 12) ++# define RADEON_BLEND_OP_COUNT_SHIFT 12 ++# define RADEON_POSITION_BLEND_OP_ENABLE (1 << 16) ++# define RADEON_NORMAL_BLEND_OP_ENABLE (1 << 17) ++# define RADEON_VERTEX_BLEND_SRC_0_PRIMARY (1 << 18) ++# define RADEON_VERTEX_BLEND_SRC_0_SECONDARY (1 << 18) ++# define RADEON_VERTEX_BLEND_SRC_1_PRIMARY (1 << 19) ++# define RADEON_VERTEX_BLEND_SRC_1_SECONDARY (1 << 19) ++# define RADEON_VERTEX_BLEND_SRC_2_PRIMARY (1 << 20) ++# define RADEON_VERTEX_BLEND_SRC_2_SECONDARY (1 << 20) ++# define RADEON_VERTEX_BLEND_SRC_3_PRIMARY (1 << 21) ++# define RADEON_VERTEX_BLEND_SRC_3_SECONDARY (1 << 21) ++# define RADEON_VERTEX_BLEND_WGT_MINUS_ONE (1 << 22) ++# define RADEON_CULL_FRONT_IS_CW (0 << 28) ++# define RADEON_CULL_FRONT_IS_CCW (1 << 28) ++# define RADEON_CULL_FRONT (1 << 29) ++# define RADEON_CULL_BACK (1 << 30) ++# define RADEON_FORCE_W_TO_ONE (1 << 31) ++ ++#define RADEON_SE_VPORT_XSCALE 0x1d98 ++#define RADEON_SE_VPORT_XOFFSET 0x1d9c ++#define RADEON_SE_VPORT_YSCALE 0x1da0 ++#define RADEON_SE_VPORT_YOFFSET 0x1da4 ++#define RADEON_SE_VPORT_ZSCALE 0x1da8 ++#define RADEON_SE_VPORT_ZOFFSET 0x1dac ++#define RADEON_SE_ZBIAS_FACTOR 0x1db0 ++#define RADEON_SE_ZBIAS_CONSTANT 0x1db4 ++ ++#define RADEON_SE_VTX_FMT 0x2080 ++# define RADEON_SE_VTX_FMT_XY 0x00000000 ++# define RADEON_SE_VTX_FMT_W0 0x00000001 ++# define RADEON_SE_VTX_FMT_FPCOLOR 0x00000002 ++# define RADEON_SE_VTX_FMT_FPALPHA 0x00000004 ++# define RADEON_SE_VTX_FMT_PKCOLOR 0x00000008 ++# define RADEON_SE_VTX_FMT_FPSPEC 0x00000010 ++# define RADEON_SE_VTX_FMT_FPFOG 0x00000020 ++# define RADEON_SE_VTX_FMT_PKSPEC 0x00000040 ++# define RADEON_SE_VTX_FMT_ST0 0x00000080 ++# define RADEON_SE_VTX_FMT_ST1 0x00000100 ++# define RADEON_SE_VTX_FMT_Q1 0x00000200 ++# define RADEON_SE_VTX_FMT_ST2 0x00000400 ++# define RADEON_SE_VTX_FMT_Q2 0x00000800 ++# define RADEON_SE_VTX_FMT_ST3 0x00001000 ++# define RADEON_SE_VTX_FMT_Q3 0x00002000 ++# define RADEON_SE_VTX_FMT_Q0 0x00004000 ++# define RADEON_SE_VTX_FMT_BLND_WEIGHT_CNT_MASK 0x00038000 ++# define RADEON_SE_VTX_FMT_N0 0x00040000 ++# define RADEON_SE_VTX_FMT_XY1 0x08000000 ++# define RADEON_SE_VTX_FMT_Z1 0x10000000 ++# define RADEON_SE_VTX_FMT_W1 0x20000000 ++# define RADEON_SE_VTX_FMT_N1 0x40000000 ++# define RADEON_SE_VTX_FMT_Z 0x80000000 ++ ++#define RADEON_SE_VF_CNTL 0x2084 ++# define RADEON_VF_PRIM_TYPE_POINT_LIST 1 ++# define RADEON_VF_PRIM_TYPE_LINE_LIST 2 ++# define RADEON_VF_PRIM_TYPE_LINE_STRIP 3 ++# define RADEON_VF_PRIM_TYPE_TRIANGLE_LIST 4 ++# define RADEON_VF_PRIM_TYPE_TRIANGLE_FAN 5 ++# define RADEON_VF_PRIM_TYPE_TRIANGLE_STRIP 6 ++# define RADEON_VF_PRIM_TYPE_TRIANGLE_FLAG 7 ++# define RADEON_VF_PRIM_TYPE_RECTANGLE_LIST 8 ++# define RADEON_VF_PRIM_TYPE_POINT_LIST_3 9 ++# define RADEON_VF_PRIM_TYPE_LINE_LIST_3 10 ++# define RADEON_VF_PRIM_TYPE_SPIRIT_LIST 11 ++# define RADEON_VF_PRIM_TYPE_LINE_LOOP 12 ++# define RADEON_VF_PRIM_TYPE_QUAD_LIST 13 ++# define RADEON_VF_PRIM_TYPE_QUAD_STRIP 14 ++# define RADEON_VF_PRIM_TYPE_POLYGON 15 ++# define RADEON_VF_PRIM_WALK_STATE (0<<4) ++# define RADEON_VF_PRIM_WALK_INDEX (1<<4) ++# define RADEON_VF_PRIM_WALK_LIST (2<<4) ++# define RADEON_VF_PRIM_WALK_DATA (3<<4) ++# define RADEON_VF_COLOR_ORDER_RGBA (1<<6) ++# define RADEON_VF_RADEON_MODE (1<<8) ++# define RADEON_VF_TCL_OUTPUT_CTL_ENA (1<<9) ++# define RADEON_VF_PROG_STREAM_ENA (1<<10) ++# define RADEON_VF_INDEX_SIZE_SHIFT 11 ++# define RADEON_VF_NUM_VERTICES_SHIFT 16 ++ ++#define RADEON_SE_PORT_DATA0 0x2000 ++ ++#define R200_SE_VAP_CNTL 0x2080 ++# define R200_VAP_TCL_ENABLE 0x00000001 ++# define R200_VAP_SINGLE_BUF_STATE_ENABLE 0x00000010 ++# define R200_VAP_FORCE_W_TO_ONE 0x00010000 ++# define R200_VAP_D3D_TEX_DEFAULT 0x00020000 ++# define R200_VAP_VF_MAX_VTX_NUM__SHIFT 18 ++# define R200_VAP_VF_MAX_VTX_NUM (9 << 18) ++# define R200_VAP_DX_CLIP_SPACE_DEF 0x00400000 ++#define R200_VF_MAX_VTX_INDX 0x210c ++#define R200_VF_MIN_VTX_INDX 0x2110 ++#define R200_SE_VTE_CNTL 0x20b0 ++# define R200_VPORT_X_SCALE_ENA 0x00000001 ++# define R200_VPORT_X_OFFSET_ENA 0x00000002 ++# define R200_VPORT_Y_SCALE_ENA 0x00000004 ++# define R200_VPORT_Y_OFFSET_ENA 0x00000008 ++# define R200_VPORT_Z_SCALE_ENA 0x00000010 ++# define R200_VPORT_Z_OFFSET_ENA 0x00000020 ++# define R200_VTX_XY_FMT 0x00000100 ++# define R200_VTX_Z_FMT 0x00000200 ++# define R200_VTX_W0_FMT 0x00000400 ++# define R200_VTX_W0_NORMALIZE 0x00000800 ++# define R200_VTX_ST_DENORMALIZED 0x00001000 ++#define R200_SE_VAP_CNTL_STATUS 0x2140 ++# define R200_VC_NO_SWAP (0 << 0) ++# define R200_VC_16BIT_SWAP (1 << 0) ++# define R200_VC_32BIT_SWAP (2 << 0) ++#define R200_PP_TXFILTER_0 0x2c00 ++#define R200_PP_TXFILTER_1 0x2c20 ++#define R200_PP_TXFILTER_2 0x2c40 ++#define R200_PP_TXFILTER_3 0x2c60 ++#define R200_PP_TXFILTER_4 0x2c80 ++#define R200_PP_TXFILTER_5 0x2ca0 ++# define R200_MAG_FILTER_NEAREST (0 << 0) ++# define R200_MAG_FILTER_LINEAR (1 << 0) ++# define R200_MAG_FILTER_MASK (1 << 0) ++# define R200_MIN_FILTER_NEAREST (0 << 1) ++# define R200_MIN_FILTER_LINEAR (1 << 1) ++# define R200_MIN_FILTER_NEAREST_MIP_NEAREST (2 << 1) ++# define R200_MIN_FILTER_NEAREST_MIP_LINEAR (3 << 1) ++# define R200_MIN_FILTER_LINEAR_MIP_NEAREST (6 << 1) ++# define R200_MIN_FILTER_LINEAR_MIP_LINEAR (7 << 1) ++# define R200_MIN_FILTER_ANISO_NEAREST (8 << 1) ++# define R200_MIN_FILTER_ANISO_LINEAR (9 << 1) ++# define R200_MIN_FILTER_ANISO_NEAREST_MIP_NEAREST (10 << 1) ++# define R200_MIN_FILTER_ANISO_NEAREST_MIP_LINEAR (11 << 1) ++# define R200_MIN_FILTER_MASK (15 << 1) ++# define R200_MAX_ANISO_1_TO_1 (0 << 5) ++# define R200_MAX_ANISO_2_TO_1 (1 << 5) ++# define R200_MAX_ANISO_4_TO_1 (2 << 5) ++# define R200_MAX_ANISO_8_TO_1 (3 << 5) ++# define R200_MAX_ANISO_16_TO_1 (4 << 5) ++# define R200_MAX_ANISO_MASK (7 << 5) ++# define R200_MAX_MIP_LEVEL_MASK (0x0f << 16) ++# define R200_MAX_MIP_LEVEL_SHIFT 16 ++# define R200_YUV_TO_RGB (1 << 20) ++# define R200_YUV_TEMPERATURE_COOL (0 << 21) ++# define R200_YUV_TEMPERATURE_HOT (1 << 21) ++# define R200_YUV_TEMPERATURE_MASK (1 << 21) ++# define R200_WRAPEN_S (1 << 22) ++# define R200_CLAMP_S_WRAP (0 << 23) ++# define R200_CLAMP_S_MIRROR (1 << 23) ++# define R200_CLAMP_S_CLAMP_LAST (2 << 23) ++# define R200_CLAMP_S_MIRROR_CLAMP_LAST (3 << 23) ++# define R200_CLAMP_S_CLAMP_BORDER (4 << 23) ++# define R200_CLAMP_S_MIRROR_CLAMP_BORDER (5 << 23) ++# define R200_CLAMP_S_CLAMP_GL (6 << 23) ++# define R200_CLAMP_S_MIRROR_CLAMP_GL (7 << 23) ++# define R200_CLAMP_S_MASK (7 << 23) ++# define R200_WRAPEN_T (1 << 26) ++# define R200_CLAMP_T_WRAP (0 << 27) ++# define R200_CLAMP_T_MIRROR (1 << 27) ++# define R200_CLAMP_T_CLAMP_LAST (2 << 27) ++# define R200_CLAMP_T_MIRROR_CLAMP_LAST (3 << 27) ++# define R200_CLAMP_T_CLAMP_BORDER (4 << 27) ++# define R200_CLAMP_T_MIRROR_CLAMP_BORDER (5 << 27) ++# define R200_CLAMP_T_CLAMP_GL (6 << 27) ++# define R200_CLAMP_T_MIRROR_CLAMP_GL (7 << 27) ++# define R200_CLAMP_T_MASK (7 << 27) ++# define R200_KILL_LT_ZERO (1 << 30) ++# define R200_BORDER_MODE_OGL (0 << 31) ++# define R200_BORDER_MODE_D3D (1 << 31) ++#define R200_PP_TXFORMAT_0 0x2c04 ++#define R200_PP_TXFORMAT_1 0x2c24 ++#define R200_PP_TXFORMAT_2 0x2c44 ++#define R200_PP_TXFORMAT_3 0x2c64 ++#define R200_PP_TXFORMAT_4 0x2c84 ++#define R200_PP_TXFORMAT_5 0x2ca4 ++# define R200_TXFORMAT_I8 (0 << 0) ++# define R200_TXFORMAT_AI88 (1 << 0) ++# define R200_TXFORMAT_RGB332 (2 << 0) ++# define R200_TXFORMAT_ARGB1555 (3 << 0) ++# define R200_TXFORMAT_RGB565 (4 << 0) ++# define R200_TXFORMAT_ARGB4444 (5 << 0) ++# define R200_TXFORMAT_ARGB8888 (6 << 0) ++# define R200_TXFORMAT_RGBA8888 (7 << 0) ++# define R200_TXFORMAT_Y8 (8 << 0) ++# define R200_TXFORMAT_AVYU4444 (9 << 0) ++# define R200_TXFORMAT_VYUY422 (10 << 0) ++# define R200_TXFORMAT_YVYU422 (11 << 0) ++# define R200_TXFORMAT_DXT1 (12 << 0) ++# define R200_TXFORMAT_DXT23 (14 << 0) ++# define R200_TXFORMAT_DXT45 (15 << 0) ++# define R200_TXFORMAT_ABGR8888 (22 << 0) ++# define R200_TXFORMAT_FORMAT_MASK (31 << 0) ++# define R200_TXFORMAT_FORMAT_SHIFT 0 ++# define R200_TXFORMAT_ALPHA_IN_MAP (1 << 6) ++# define R200_TXFORMAT_NON_POWER2 (1 << 7) ++# define R200_TXFORMAT_WIDTH_MASK (15 << 8) ++# define R200_TXFORMAT_WIDTH_SHIFT 8 ++# define R200_TXFORMAT_HEIGHT_MASK (15 << 12) ++# define R200_TXFORMAT_HEIGHT_SHIFT 12 ++# define R200_TXFORMAT_F5_WIDTH_MASK (15 << 16) /* cube face 5 */ ++# define R200_TXFORMAT_F5_WIDTH_SHIFT 16 ++# define R200_TXFORMAT_F5_HEIGHT_MASK (15 << 20) ++# define R200_TXFORMAT_F5_HEIGHT_SHIFT 20 ++# define R200_TXFORMAT_ST_ROUTE_STQ0 (0 << 24) ++# define R200_TXFORMAT_ST_ROUTE_STQ1 (1 << 24) ++# define R200_TXFORMAT_ST_ROUTE_STQ2 (2 << 24) ++# define R200_TXFORMAT_ST_ROUTE_STQ3 (3 << 24) ++# define R200_TXFORMAT_ST_ROUTE_STQ4 (4 << 24) ++# define R200_TXFORMAT_ST_ROUTE_STQ5 (5 << 24) ++# define R200_TXFORMAT_ST_ROUTE_MASK (7 << 24) ++# define R200_TXFORMAT_ST_ROUTE_SHIFT 24 ++# define R200_TXFORMAT_ALPHA_MASK_ENABLE (1 << 28) ++# define R200_TXFORMAT_CHROMA_KEY_ENABLE (1 << 29) ++# define R200_TXFORMAT_CUBIC_MAP_ENABLE (1 << 30) ++#define R200_PP_TXFORMAT_X_0 0x2c08 ++#define R200_PP_TXFORMAT_X_1 0x2c28 ++#define R200_PP_TXFORMAT_X_2 0x2c48 ++#define R200_PP_TXFORMAT_X_3 0x2c68 ++#define R200_PP_TXFORMAT_X_4 0x2c88 ++#define R200_PP_TXFORMAT_X_5 0x2ca8 ++ ++#define R200_PP_TXSIZE_0 0x2c0c /* NPOT only */ ++#define R200_PP_TXSIZE_1 0x2c2c /* NPOT only */ ++#define R200_PP_TXSIZE_2 0x2c4c /* NPOT only */ ++#define R200_PP_TXSIZE_3 0x2c6c /* NPOT only */ ++#define R200_PP_TXSIZE_4 0x2c8c /* NPOT only */ ++#define R200_PP_TXSIZE_5 0x2cac /* NPOT only */ ++ ++#define R200_PP_TXPITCH_0 0x2c10 /* NPOT only */ ++#define R200_PP_TXPITCH_1 0x2c30 /* NPOT only */ ++#define R200_PP_TXPITCH_2 0x2c50 /* NPOT only */ ++#define R200_PP_TXPITCH_3 0x2c70 /* NPOT only */ ++#define R200_PP_TXPITCH_4 0x2c90 /* NPOT only */ ++#define R200_PP_TXPITCH_5 0x2cb0 /* NPOT only */ ++ ++#define R200_PP_TXOFFSET_0 0x2d00 ++# define R200_TXO_ENDIAN_NO_SWAP (0 << 0) ++# define R200_TXO_ENDIAN_BYTE_SWAP (1 << 0) ++# define R200_TXO_ENDIAN_WORD_SWAP (2 << 0) ++# define R200_TXO_ENDIAN_HALFDW_SWAP (3 << 0) ++# define R200_TXO_MACRO_LINEAR (0 << 2) ++# define R200_TXO_MACRO_TILE (1 << 2) ++# define R200_TXO_MICRO_LINEAR (0 << 3) ++# define R200_TXO_MICRO_TILE (1 << 3) ++# define R200_TXO_OFFSET_MASK 0xffffffe0 ++# define R200_TXO_OFFSET_SHIFT 5 ++#define R200_PP_TXOFFSET_1 0x2d18 ++#define R200_PP_TXOFFSET_2 0x2d30 ++#define R200_PP_TXOFFSET_3 0x2d48 ++#define R200_PP_TXOFFSET_4 0x2d60 ++#define R200_PP_TXOFFSET_5 0x2d78 ++ ++#define R200_PP_TFACTOR_0 0x2ee0 ++#define R200_PP_TFACTOR_1 0x2ee4 ++#define R200_PP_TFACTOR_2 0x2ee8 ++#define R200_PP_TFACTOR_3 0x2eec ++#define R200_PP_TFACTOR_4 0x2ef0 ++#define R200_PP_TFACTOR_5 0x2ef4 ++ ++#define R200_PP_TXCBLEND_0 0x2f00 ++# define R200_TXC_ARG_A_ZERO (0) ++# define R200_TXC_ARG_A_CURRENT_COLOR (2) ++# define R200_TXC_ARG_A_CURRENT_ALPHA (3) ++# define R200_TXC_ARG_A_DIFFUSE_COLOR (4) ++# define R200_TXC_ARG_A_DIFFUSE_ALPHA (5) ++# define R200_TXC_ARG_A_SPECULAR_COLOR (6) ++# define R200_TXC_ARG_A_SPECULAR_ALPHA (7) ++# define R200_TXC_ARG_A_TFACTOR_COLOR (8) ++# define R200_TXC_ARG_A_TFACTOR_ALPHA (9) ++# define R200_TXC_ARG_A_R0_COLOR (10) ++# define R200_TXC_ARG_A_R0_ALPHA (11) ++# define R200_TXC_ARG_A_R1_COLOR (12) ++# define R200_TXC_ARG_A_R1_ALPHA (13) ++# define R200_TXC_ARG_A_R2_COLOR (14) ++# define R200_TXC_ARG_A_R2_ALPHA (15) ++# define R200_TXC_ARG_A_R3_COLOR (16) ++# define R200_TXC_ARG_A_R3_ALPHA (17) ++# define R200_TXC_ARG_A_R4_COLOR (18) ++# define R200_TXC_ARG_A_R4_ALPHA (19) ++# define R200_TXC_ARG_A_R5_COLOR (20) ++# define R200_TXC_ARG_A_R5_ALPHA (21) ++# define R200_TXC_ARG_A_TFACTOR1_COLOR (26) ++# define R200_TXC_ARG_A_TFACTOR1_ALPHA (27) ++# define R200_TXC_ARG_A_MASK (31 << 0) ++# define R200_TXC_ARG_A_SHIFT 0 ++# define R200_TXC_ARG_B_ZERO (0 << 5) ++# define R200_TXC_ARG_B_CURRENT_COLOR (2 << 5) ++# define R200_TXC_ARG_B_CURRENT_ALPHA (3 << 5) ++# define R200_TXC_ARG_B_DIFFUSE_COLOR (4 << 5) ++# define R200_TXC_ARG_B_DIFFUSE_ALPHA (5 << 5) ++# define R200_TXC_ARG_B_SPECULAR_COLOR (6 << 5) ++# define R200_TXC_ARG_B_SPECULAR_ALPHA (7 << 5) ++# define R200_TXC_ARG_B_TFACTOR_COLOR (8 << 5) ++# define R200_TXC_ARG_B_TFACTOR_ALPHA (9 << 5) ++# define R200_TXC_ARG_B_R0_COLOR (10 << 5) ++# define R200_TXC_ARG_B_R0_ALPHA (11 << 5) ++# define R200_TXC_ARG_B_R1_COLOR (12 << 5) ++# define R200_TXC_ARG_B_R1_ALPHA (13 << 5) ++# define R200_TXC_ARG_B_R2_COLOR (14 << 5) ++# define R200_TXC_ARG_B_R2_ALPHA (15 << 5) ++# define R200_TXC_ARG_B_R3_COLOR (16 << 5) ++# define R200_TXC_ARG_B_R3_ALPHA (17 << 5) ++# define R200_TXC_ARG_B_R4_COLOR (18 << 5) ++# define R200_TXC_ARG_B_R4_ALPHA (19 << 5) ++# define R200_TXC_ARG_B_R5_COLOR (20 << 5) ++# define R200_TXC_ARG_B_R5_ALPHA (21 << 5) ++# define R200_TXC_ARG_B_TFACTOR1_COLOR (26 << 5) ++# define R200_TXC_ARG_B_TFACTOR1_ALPHA (27 << 5) ++# define R200_TXC_ARG_B_MASK (31 << 5) ++# define R200_TXC_ARG_B_SHIFT 5 ++# define R200_TXC_ARG_C_ZERO (0 << 10) ++# define R200_TXC_ARG_C_CURRENT_COLOR (2 << 10) ++# define R200_TXC_ARG_C_CURRENT_ALPHA (3 << 10) ++# define R200_TXC_ARG_C_DIFFUSE_COLOR (4 << 10) ++# define R200_TXC_ARG_C_DIFFUSE_ALPHA (5 << 10) ++# define R200_TXC_ARG_C_SPECULAR_COLOR (6 << 10) ++# define R200_TXC_ARG_C_SPECULAR_ALPHA (7 << 10) ++# define R200_TXC_ARG_C_TFACTOR_COLOR (8 << 10) ++# define R200_TXC_ARG_C_TFACTOR_ALPHA (9 << 10) ++# define R200_TXC_ARG_C_R0_COLOR (10 << 10) ++# define R200_TXC_ARG_C_R0_ALPHA (11 << 10) ++# define R200_TXC_ARG_C_R1_COLOR (12 << 10) ++# define R200_TXC_ARG_C_R1_ALPHA (13 << 10) ++# define R200_TXC_ARG_C_R2_COLOR (14 << 10) ++# define R200_TXC_ARG_C_R2_ALPHA (15 << 10) ++# define R200_TXC_ARG_C_R3_COLOR (16 << 10) ++# define R200_TXC_ARG_C_R3_ALPHA (17 << 10) ++# define R200_TXC_ARG_C_R4_COLOR (18 << 10) ++# define R200_TXC_ARG_C_R4_ALPHA (19 << 10) ++# define R200_TXC_ARG_C_R5_COLOR (20 << 10) ++# define R200_TXC_ARG_C_R5_ALPHA (21 << 10) ++# define R200_TXC_ARG_C_TFACTOR1_COLOR (26 << 10) ++# define R200_TXC_ARG_C_TFACTOR1_ALPHA (27 << 10) ++# define R200_TXC_ARG_C_MASK (31 << 10) ++# define R200_TXC_ARG_C_SHIFT 10 ++# define R200_TXC_COMP_ARG_A (1 << 16) ++# define R200_TXC_COMP_ARG_A_SHIFT (16) ++# define R200_TXC_BIAS_ARG_A (1 << 17) ++# define R200_TXC_SCALE_ARG_A (1 << 18) ++# define R200_TXC_NEG_ARG_A (1 << 19) ++# define R200_TXC_COMP_ARG_B (1 << 20) ++# define R200_TXC_COMP_ARG_B_SHIFT (20) ++# define R200_TXC_BIAS_ARG_B (1 << 21) ++# define R200_TXC_SCALE_ARG_B (1 << 22) ++# define R200_TXC_NEG_ARG_B (1 << 23) ++# define R200_TXC_COMP_ARG_C (1 << 24) ++# define R200_TXC_COMP_ARG_C_SHIFT (24) ++# define R200_TXC_BIAS_ARG_C (1 << 25) ++# define R200_TXC_SCALE_ARG_C (1 << 26) ++# define R200_TXC_NEG_ARG_C (1 << 27) ++# define R200_TXC_OP_MADD (0 << 28) ++# define R200_TXC_OP_CND0 (2 << 28) ++# define R200_TXC_OP_LERP (3 << 28) ++# define R200_TXC_OP_DOT3 (4 << 28) ++# define R200_TXC_OP_DOT4 (5 << 28) ++# define R200_TXC_OP_CONDITIONAL (6 << 28) ++# define R200_TXC_OP_DOT2_ADD (7 << 28) ++# define R200_TXC_OP_MASK (7 << 28) ++#define R200_PP_TXCBLEND2_0 0x2f04 ++# define R200_TXC_TFACTOR_SEL_SHIFT 0 ++# define R200_TXC_TFACTOR_SEL_MASK 0x7 ++# define R200_TXC_TFACTOR1_SEL_SHIFT 4 ++# define R200_TXC_TFACTOR1_SEL_MASK (0x7 << 4) ++# define R200_TXC_SCALE_SHIFT 8 ++# define R200_TXC_SCALE_MASK (7 << 8) ++# define R200_TXC_SCALE_1X (0 << 8) ++# define R200_TXC_SCALE_2X (1 << 8) ++# define R200_TXC_SCALE_4X (2 << 8) ++# define R200_TXC_SCALE_8X (3 << 8) ++# define R200_TXC_SCALE_INV2 (5 << 8) ++# define R200_TXC_SCALE_INV4 (6 << 8) ++# define R200_TXC_SCALE_INV8 (7 << 8) ++# define R200_TXC_CLAMP_SHIFT 12 ++# define R200_TXC_CLAMP_MASK (3 << 12) ++# define R200_TXC_CLAMP_WRAP (0 << 12) ++# define R200_TXC_CLAMP_0_1 (1 << 12) ++# define R200_TXC_CLAMP_8_8 (2 << 12) ++# define R200_TXC_OUTPUT_REG_MASK (7 << 16) ++# define R200_TXC_OUTPUT_REG_NONE (0 << 16) ++# define R200_TXC_OUTPUT_REG_R0 (1 << 16) ++# define R200_TXC_OUTPUT_REG_R1 (2 << 16) ++# define R200_TXC_OUTPUT_REG_R2 (3 << 16) ++# define R200_TXC_OUTPUT_REG_R3 (4 << 16) ++# define R200_TXC_OUTPUT_REG_R4 (5 << 16) ++# define R200_TXC_OUTPUT_REG_R5 (6 << 16) ++# define R200_TXC_OUTPUT_MASK_MASK (7 << 20) ++# define R200_TXC_OUTPUT_MASK_RGB (0 << 20) ++# define R200_TXC_OUTPUT_MASK_RG (1 << 20) ++# define R200_TXC_OUTPUT_MASK_RB (2 << 20) ++# define R200_TXC_OUTPUT_MASK_R (3 << 20) ++# define R200_TXC_OUTPUT_MASK_GB (4 << 20) ++# define R200_TXC_OUTPUT_MASK_G (5 << 20) ++# define R200_TXC_OUTPUT_MASK_B (6 << 20) ++# define R200_TXC_OUTPUT_MASK_NONE (7 << 20) ++# define R200_TXC_REPL_NORMAL 0 ++# define R200_TXC_REPL_RED 1 ++# define R200_TXC_REPL_GREEN 2 ++# define R200_TXC_REPL_BLUE 3 ++# define R200_TXC_REPL_ARG_A_SHIFT 26 ++# define R200_TXC_REPL_ARG_A_MASK (3 << 26) ++# define R200_TXC_REPL_ARG_B_SHIFT 28 ++# define R200_TXC_REPL_ARG_B_MASK (3 << 28) ++# define R200_TXC_REPL_ARG_C_SHIFT 30 ++# define R200_TXC_REPL_ARG_C_MASK (3 << 30) ++#define R200_PP_TXABLEND_0 0x2f08 ++# define R200_TXA_ARG_A_ZERO (0) ++# define R200_TXA_ARG_A_CURRENT_ALPHA (2) /* guess */ ++# define R200_TXA_ARG_A_CURRENT_BLUE (3) /* guess */ ++# define R200_TXA_ARG_A_DIFFUSE_ALPHA (4) ++# define R200_TXA_ARG_A_DIFFUSE_BLUE (5) ++# define R200_TXA_ARG_A_SPECULAR_ALPHA (6) ++# define R200_TXA_ARG_A_SPECULAR_BLUE (7) ++# define R200_TXA_ARG_A_TFACTOR_ALPHA (8) ++# define R200_TXA_ARG_A_TFACTOR_BLUE (9) ++# define R200_TXA_ARG_A_R0_ALPHA (10) ++# define R200_TXA_ARG_A_R0_BLUE (11) ++# define R200_TXA_ARG_A_R1_ALPHA (12) ++# define R200_TXA_ARG_A_R1_BLUE (13) ++# define R200_TXA_ARG_A_R2_ALPHA (14) ++# define R200_TXA_ARG_A_R2_BLUE (15) ++# define R200_TXA_ARG_A_R3_ALPHA (16) ++# define R200_TXA_ARG_A_R3_BLUE (17) ++# define R200_TXA_ARG_A_R4_ALPHA (18) ++# define R200_TXA_ARG_A_R4_BLUE (19) ++# define R200_TXA_ARG_A_R5_ALPHA (20) ++# define R200_TXA_ARG_A_R5_BLUE (21) ++# define R200_TXA_ARG_A_TFACTOR1_ALPHA (26) ++# define R200_TXA_ARG_A_TFACTOR1_BLUE (27) ++# define R200_TXA_ARG_A_MASK (31 << 0) ++# define R200_TXA_ARG_A_SHIFT 0 ++# define R200_TXA_ARG_B_ZERO (0 << 5) ++# define R200_TXA_ARG_B_CURRENT_ALPHA (2 << 5) /* guess */ ++# define R200_TXA_ARG_B_CURRENT_BLUE (3 << 5) /* guess */ ++# define R200_TXA_ARG_B_DIFFUSE_ALPHA (4 << 5) ++# define R200_TXA_ARG_B_DIFFUSE_BLUE (5 << 5) ++# define R200_TXA_ARG_B_SPECULAR_ALPHA (6 << 5) ++# define R200_TXA_ARG_B_SPECULAR_BLUE (7 << 5) ++# define R200_TXA_ARG_B_TFACTOR_ALPHA (8 << 5) ++# define R200_TXA_ARG_B_TFACTOR_BLUE (9 << 5) ++# define R200_TXA_ARG_B_R0_ALPHA (10 << 5) ++# define R200_TXA_ARG_B_R0_BLUE (11 << 5) ++# define R200_TXA_ARG_B_R1_ALPHA (12 << 5) ++# define R200_TXA_ARG_B_R1_BLUE (13 << 5) ++# define R200_TXA_ARG_B_R2_ALPHA (14 << 5) ++# define R200_TXA_ARG_B_R2_BLUE (15 << 5) ++# define R200_TXA_ARG_B_R3_ALPHA (16 << 5) ++# define R200_TXA_ARG_B_R3_BLUE (17 << 5) ++# define R200_TXA_ARG_B_R4_ALPHA (18 << 5) ++# define R200_TXA_ARG_B_R4_BLUE (19 << 5) ++# define R200_TXA_ARG_B_R5_ALPHA (20 << 5) ++# define R200_TXA_ARG_B_R5_BLUE (21 << 5) ++# define R200_TXA_ARG_B_TFACTOR1_ALPHA (26 << 5) ++# define R200_TXA_ARG_B_TFACTOR1_BLUE (27 << 5) ++# define R200_TXA_ARG_B_MASK (31 << 5) ++# define R200_TXA_ARG_B_SHIFT 5 ++# define R200_TXA_ARG_C_ZERO (0 << 10) ++# define R200_TXA_ARG_C_CURRENT_ALPHA (2 << 10) /* guess */ ++# define R200_TXA_ARG_C_CURRENT_BLUE (3 << 10) /* guess */ ++# define R200_TXA_ARG_C_DIFFUSE_ALPHA (4 << 10) ++# define R200_TXA_ARG_C_DIFFUSE_BLUE (5 << 10) ++# define R200_TXA_ARG_C_SPECULAR_ALPHA (6 << 10) ++# define R200_TXA_ARG_C_SPECULAR_BLUE (7 << 10) ++# define R200_TXA_ARG_C_TFACTOR_ALPHA (8 << 10) ++# define R200_TXA_ARG_C_TFACTOR_BLUE (9 << 10) ++# define R200_TXA_ARG_C_R0_ALPHA (10 << 10) ++# define R200_TXA_ARG_C_R0_BLUE (11 << 10) ++# define R200_TXA_ARG_C_R1_ALPHA (12 << 10) ++# define R200_TXA_ARG_C_R1_BLUE (13 << 10) ++# define R200_TXA_ARG_C_R2_ALPHA (14 << 10) ++# define R200_TXA_ARG_C_R2_BLUE (15 << 10) ++# define R200_TXA_ARG_C_R3_ALPHA (16 << 10) ++# define R200_TXA_ARG_C_R3_BLUE (17 << 10) ++# define R200_TXA_ARG_C_R4_ALPHA (18 << 10) ++# define R200_TXA_ARG_C_R4_BLUE (19 << 10) ++# define R200_TXA_ARG_C_R5_ALPHA (20 << 10) ++# define R200_TXA_ARG_C_R5_BLUE (21 << 10) ++# define R200_TXA_ARG_C_TFACTOR1_ALPHA (26 << 10) ++# define R200_TXA_ARG_C_TFACTOR1_BLUE (27 << 10) ++# define R200_TXA_ARG_C_MASK (31 << 10) ++# define R200_TXA_ARG_C_SHIFT 10 ++# define R200_TXA_COMP_ARG_A (1 << 16) ++# define R200_TXA_COMP_ARG_A_SHIFT (16) ++# define R200_TXA_BIAS_ARG_A (1 << 17) ++# define R200_TXA_SCALE_ARG_A (1 << 18) ++# define R200_TXA_NEG_ARG_A (1 << 19) ++# define R200_TXA_COMP_ARG_B (1 << 20) ++# define R200_TXA_COMP_ARG_B_SHIFT (20) ++# define R200_TXA_BIAS_ARG_B (1 << 21) ++# define R200_TXA_SCALE_ARG_B (1 << 22) ++# define R200_TXA_NEG_ARG_B (1 << 23) ++# define R200_TXA_COMP_ARG_C (1 << 24) ++# define R200_TXA_COMP_ARG_C_SHIFT (24) ++# define R200_TXA_BIAS_ARG_C (1 << 25) ++# define R200_TXA_SCALE_ARG_C (1 << 26) ++# define R200_TXA_NEG_ARG_C (1 << 27) ++# define R200_TXA_OP_MADD (0 << 28) ++# define R200_TXA_OP_CND0 (2 << 28) ++# define R200_TXA_OP_LERP (3 << 28) ++# define R200_TXA_OP_CONDITIONAL (6 << 28) ++# define R200_TXA_OP_MASK (7 << 28) ++#define R200_PP_TXABLEND2_0 0x2f0c ++# define R200_TXA_TFACTOR_SEL_SHIFT 0 ++# define R200_TXA_TFACTOR_SEL_MASK 0x7 ++# define R200_TXA_TFACTOR1_SEL_SHIFT 4 ++# define R200_TXA_TFACTOR1_SEL_MASK (0x7 << 4) ++# define R200_TXA_SCALE_SHIFT 8 ++# define R200_TXA_SCALE_MASK (7 << 8) ++# define R200_TXA_SCALE_1X (0 << 8) ++# define R200_TXA_SCALE_2X (1 << 8) ++# define R200_TXA_SCALE_4X (2 << 8) ++# define R200_TXA_SCALE_8X (3 << 8) ++# define R200_TXA_SCALE_INV2 (5 << 8) ++# define R200_TXA_SCALE_INV4 (6 << 8) ++# define R200_TXA_SCALE_INV8 (7 << 8) ++# define R200_TXA_CLAMP_SHIFT 12 ++# define R200_TXA_CLAMP_MASK (3 << 12) ++# define R200_TXA_CLAMP_WRAP (0 << 12) ++# define R200_TXA_CLAMP_0_1 (1 << 12) ++# define R200_TXA_CLAMP_8_8 (2 << 12) ++# define R200_TXA_OUTPUT_REG_MASK (7 << 16) ++# define R200_TXA_OUTPUT_REG_NONE (0 << 16) ++# define R200_TXA_OUTPUT_REG_R0 (1 << 16) ++# define R200_TXA_OUTPUT_REG_R1 (2 << 16) ++# define R200_TXA_OUTPUT_REG_R2 (3 << 16) ++# define R200_TXA_OUTPUT_REG_R3 (4 << 16) ++# define R200_TXA_OUTPUT_REG_R4 (5 << 16) ++# define R200_TXA_OUTPUT_REG_R5 (6 << 16) ++# define R200_TXA_DOT_ALPHA (1 << 20) ++# define R200_TXA_REPL_NORMAL 0 ++# define R200_TXA_REPL_RED 1 ++# define R200_TXA_REPL_GREEN 2 ++# define R200_TXA_REPL_ARG_A_SHIFT 26 ++# define R200_TXA_REPL_ARG_A_MASK (3 << 26) ++# define R200_TXA_REPL_ARG_B_SHIFT 28 ++# define R200_TXA_REPL_ARG_B_MASK (3 << 28) ++# define R200_TXA_REPL_ARG_C_SHIFT 30 ++# define R200_TXA_REPL_ARG_C_MASK (3 << 30) ++ ++#define R200_SE_VTX_FMT_0 0x2088 ++# define R200_VTX_XY 0 /* always have xy */ ++# define R200_VTX_Z0 (1<<0) ++# define R200_VTX_W0 (1<<1) ++# define R200_VTX_WEIGHT_COUNT_SHIFT (2) ++# define R200_VTX_PV_MATRIX_SEL (1<<5) ++# define R200_VTX_N0 (1<<6) ++# define R200_VTX_POINT_SIZE (1<<7) ++# define R200_VTX_DISCRETE_FOG (1<<8) ++# define R200_VTX_SHININESS_0 (1<<9) ++# define R200_VTX_SHININESS_1 (1<<10) ++# define R200_VTX_COLOR_NOT_PRESENT 0 ++# define R200_VTX_PK_RGBA 1 ++# define R200_VTX_FP_RGB 2 ++# define R200_VTX_FP_RGBA 3 ++# define R200_VTX_COLOR_MASK 3 ++# define R200_VTX_COLOR_0_SHIFT 11 ++# define R200_VTX_COLOR_1_SHIFT 13 ++# define R200_VTX_COLOR_2_SHIFT 15 ++# define R200_VTX_COLOR_3_SHIFT 17 ++# define R200_VTX_COLOR_4_SHIFT 19 ++# define R200_VTX_COLOR_5_SHIFT 21 ++# define R200_VTX_COLOR_6_SHIFT 23 ++# define R200_VTX_COLOR_7_SHIFT 25 ++# define R200_VTX_XY1 (1<<28) ++# define R200_VTX_Z1 (1<<29) ++# define R200_VTX_W1 (1<<30) ++# define R200_VTX_N1 (1<<31) ++#define R200_SE_VTX_FMT_1 0x208c ++# define R200_VTX_TEX0_COMP_CNT_SHIFT 0 ++# define R200_VTX_TEX1_COMP_CNT_SHIFT 3 ++# define R200_VTX_TEX2_COMP_CNT_SHIFT 6 ++# define R200_VTX_TEX3_COMP_CNT_SHIFT 9 ++# define R200_VTX_TEX4_COMP_CNT_SHIFT 12 ++# define R200_VTX_TEX5_COMP_CNT_SHIFT 15 ++ ++#define R200_SE_TCL_OUTPUT_VTX_FMT_0 0x2090 ++#define R200_SE_TCL_OUTPUT_VTX_FMT_1 0x2094 ++#define R200_SE_TCL_OUTPUT_VTX_COMP_SEL 0x2250 ++# define R200_OUTPUT_XYZW (1<<0) ++# define R200_OUTPUT_COLOR_0 (1<<8) ++# define R200_OUTPUT_COLOR_1 (1<<9) ++# define R200_OUTPUT_TEX_0 (1<<16) ++# define R200_OUTPUT_TEX_1 (1<<17) ++# define R200_OUTPUT_TEX_2 (1<<18) ++# define R200_OUTPUT_TEX_3 (1<<19) ++# define R200_OUTPUT_TEX_4 (1<<20) ++# define R200_OUTPUT_TEX_5 (1<<21) ++# define R200_OUTPUT_TEX_MASK (0x3f<<16) ++# define R200_OUTPUT_DISCRETE_FOG (1<<24) ++# define R200_OUTPUT_PT_SIZE (1<<25) ++# define R200_FORCE_INORDER_PROC (1<<31) ++#define R200_PP_CNTL_X 0x2cc4 ++#define R200_PP_TXMULTI_CTL_0 0x2c1c ++#define R200_SE_VTX_STATE_CNTL 0x2180 ++# define R200_UPDATE_USER_COLOR_0_ENA_MASK (1<<16) ++ ++ /* Registers for CP and Microcode Engine */ ++#define RADEON_CP_ME_RAM_ADDR 0x07d4 ++#define RADEON_CP_ME_RAM_RADDR 0x07d8 ++#define RADEON_CP_ME_RAM_DATAH 0x07dc ++#define RADEON_CP_ME_RAM_DATAL 0x07e0 ++ ++#define RADEON_CP_RB_BASE 0x0700 ++#define RADEON_CP_RB_CNTL 0x0704 ++#define RADEON_CP_RB_RPTR_ADDR 0x070c ++#define RADEON_CP_RB_RPTR 0x0710 ++#define RADEON_CP_RB_WPTR 0x0714 ++ ++#define RADEON_CP_IB_BASE 0x0738 ++#define RADEON_CP_IB_BUFSZ 0x073c ++ ++#define RADEON_CP_CSQ_CNTL 0x0740 ++# define RADEON_CSQ_CNT_PRIMARY_MASK (0xff << 0) ++# define RADEON_CSQ_PRIDIS_INDDIS (0 << 28) ++# define RADEON_CSQ_PRIPIO_INDDIS (1 << 28) ++# define RADEON_CSQ_PRIBM_INDDIS (2 << 28) ++# define RADEON_CSQ_PRIPIO_INDBM (3 << 28) ++# define RADEON_CSQ_PRIBM_INDBM (4 << 28) ++# define RADEON_CSQ_PRIPIO_INDPIO (15 << 28) ++ ++#define R300_CP_RESYNC_ADDR 0x778 ++#define R300_CP_RESYNC_DATA 0x77c ++ ++#define RADEON_CP_CSQ_STAT 0x07f8 ++# define RADEON_CSQ_RPTR_PRIMARY_MASK (0xff << 0) ++# define RADEON_CSQ_WPTR_PRIMARY_MASK (0xff << 8) ++# define RADEON_CSQ_RPTR_INDIRECT_MASK (0xff << 16) ++# define RADEON_CSQ_WPTR_INDIRECT_MASK (0xff << 24) ++#define RADEON_CP_CSQ_ADDR 0x07f0 ++#define RADEON_CP_CSQ_DATA 0x07f4 ++#define RADEON_CP_CSQ_APER_PRIMARY 0x1000 ++#define RADEON_CP_CSQ_APER_INDIRECT 0x1300 ++ ++#define RADEON_CP_RB_WPTR_DELAY 0x0718 ++# define RADEON_PRE_WRITE_TIMER_SHIFT 0 ++# define RADEON_PRE_WRITE_LIMIT_SHIFT 23 ++ ++#define RADEON_AIC_CNTL 0x01d0 ++# define RADEON_PCIGART_TRANSLATE_EN (1 << 0) ++#define RADEON_AIC_LO_ADDR 0x01dc ++ ++ ++ ++ /* Constants */ ++//#define RADEON_LAST_FRAME_REG RADEON_GUI_SCRATCH_REG0 ++//efine RADEON_LAST_CLEAR_REG RADEON_GUI_SCRATCH_REG2 ++ ++ ++ ++ /* CP packet types */ ++#define RADEON_CP_PACKET0 0x00000000 ++#define RADEON_CP_PACKET1 0x40000000 ++#define RADEON_CP_PACKET2 0x80000000 ++#define RADEON_CP_PACKET3 0xC0000000 ++# define RADEON_CP_PACKET_MASK 0xC0000000 ++# define RADEON_CP_PACKET_COUNT_MASK 0x3fff0000 ++# define RADEON_CP_PACKET_MAX_DWORDS (1 << 12) ++# define RADEON_CP_PACKET0_REG_MASK 0x000007ff ++# define R300_CP_PACKET0_REG_MASK 0x00001fff ++# define RADEON_CP_PACKET1_REG0_MASK 0x000007ff ++# define RADEON_CP_PACKET1_REG1_MASK 0x003ff800 ++ ++#define RADEON_CP_PACKET0_ONE_REG_WR 0x00008000 ++ ++#define RADEON_CP_PACKET3_NOP 0xC0001000 ++#define RADEON_CP_PACKET3_NEXT_CHAR 0xC0001900 ++#define RADEON_CP_PACKET3_PLY_NEXTSCAN 0xC0001D00 ++#define RADEON_CP_PACKET3_SET_SCISSORS 0xC0001E00 ++#define RADEON_CP_PACKET3_3D_RNDR_GEN_INDX_PRIM 0xC0002300 ++#define RADEON_CP_PACKET3_LOAD_MICROCODE 0xC0002400 ++#define RADEON_CP_PACKET3_WAIT_FOR_IDLE 0xC0002600 ++#define RADEON_CP_PACKET3_3D_DRAW_VBUF 0xC0002800 ++#define RADEON_CP_PACKET3_3D_DRAW_IMMD 0xC0002900 ++#define RADEON_CP_PACKET3_3D_DRAW_INDX 0xC0002A00 ++#define RADEON_CP_PACKET3_LOAD_PALETTE 0xC0002C00 ++#define R200_CP_PACKET3_3D_DRAW_IMMD_2 0xc0003500 ++#define RADEON_CP_PACKET3_3D_LOAD_VBPNTR 0xC0002F00 ++#define RADEON_CP_PACKET3_CNTL_PAINT 0xC0009100 ++#define RADEON_CP_PACKET3_CNTL_BITBLT 0xC0009200 ++#define RADEON_CP_PACKET3_CNTL_SMALLTEXT 0xC0009300 ++#define RADEON_CP_PACKET3_CNTL_HOSTDATA_BLT 0xC0009400 ++#define RADEON_CP_PACKET3_CNTL_POLYLINE 0xC0009500 ++#define RADEON_CP_PACKET3_CNTL_POLYSCANLINES 0xC0009800 ++#define RADEON_CP_PACKET3_CNTL_PAINT_MULTI 0xC0009A00 ++#define RADEON_CP_PACKET3_CNTL_BITBLT_MULTI 0xC0009B00 ++#define RADEON_CP_PACKET3_CNTL_TRANS_BITBLT 0xC0009C00 ++ ++ ++#define RADEON_CP_VC_FRMT_XY 0x00000000 ++#define RADEON_CP_VC_FRMT_W0 0x00000001 ++#define RADEON_CP_VC_FRMT_FPCOLOR 0x00000002 ++#define RADEON_CP_VC_FRMT_FPALPHA 0x00000004 ++#define RADEON_CP_VC_FRMT_PKCOLOR 0x00000008 ++#define RADEON_CP_VC_FRMT_FPSPEC 0x00000010 ++#define RADEON_CP_VC_FRMT_FPFOG 0x00000020 ++#define RADEON_CP_VC_FRMT_PKSPEC 0x00000040 ++#define RADEON_CP_VC_FRMT_ST0 0x00000080 ++#define RADEON_CP_VC_FRMT_ST1 0x00000100 ++#define RADEON_CP_VC_FRMT_Q1 0x00000200 ++#define RADEON_CP_VC_FRMT_ST2 0x00000400 ++#define RADEON_CP_VC_FRMT_Q2 0x00000800 ++#define RADEON_CP_VC_FRMT_ST3 0x00001000 ++#define RADEON_CP_VC_FRMT_Q3 0x00002000 ++#define RADEON_CP_VC_FRMT_Q0 0x00004000 ++#define RADEON_CP_VC_FRMT_BLND_WEIGHT_CNT_MASK 0x00038000 ++#define RADEON_CP_VC_FRMT_N0 0x00040000 ++#define RADEON_CP_VC_FRMT_XY1 0x08000000 ++#define RADEON_CP_VC_FRMT_Z1 0x10000000 ++#define RADEON_CP_VC_FRMT_W1 0x20000000 ++#define RADEON_CP_VC_FRMT_N1 0x40000000 ++#define RADEON_CP_VC_FRMT_Z 0x80000000 ++ ++#define RADEON_CP_VC_CNTL_PRIM_TYPE_NONE 0x00000000 ++#define RADEON_CP_VC_CNTL_PRIM_TYPE_POINT 0x00000001 ++#define RADEON_CP_VC_CNTL_PRIM_TYPE_LINE 0x00000002 ++#define RADEON_CP_VC_CNTL_PRIM_TYPE_LINE_STRIP 0x00000003 ++#define RADEON_CP_VC_CNTL_PRIM_TYPE_TRI_LIST 0x00000004 ++#define RADEON_CP_VC_CNTL_PRIM_TYPE_TRI_FAN 0x00000005 ++#define RADEON_CP_VC_CNTL_PRIM_TYPE_TRI_STRIP 0x00000006 ++#define RADEON_CP_VC_CNTL_PRIM_TYPE_TRI_TYPE_2 0x00000007 ++#define RADEON_CP_VC_CNTL_PRIM_TYPE_RECT_LIST 0x00000008 ++#define RADEON_CP_VC_CNTL_PRIM_TYPE_3VRT_POINT_LIST 0x00000009 ++#define RADEON_CP_VC_CNTL_PRIM_TYPE_3VRT_LINE_LIST 0x0000000a ++#define RADEON_CP_VC_CNTL_PRIM_WALK_IND 0x00000010 ++#define RADEON_CP_VC_CNTL_PRIM_WALK_LIST 0x00000020 ++#define RADEON_CP_VC_CNTL_PRIM_WALK_RING 0x00000030 ++#define RADEON_CP_VC_CNTL_COLOR_ORDER_BGRA 0x00000000 ++#define RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA 0x00000040 ++#define RADEON_CP_VC_CNTL_MAOS_ENABLE 0x00000080 ++#define RADEON_CP_VC_CNTL_VTX_FMT_NON_RADEON_MODE 0x00000000 ++#define RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE 0x00000100 ++#define RADEON_CP_VC_CNTL_TCL_DISABLE 0x00000000 ++#define RADEON_CP_VC_CNTL_TCL_ENABLE 0x00000200 ++#define RADEON_CP_VC_CNTL_NUM_SHIFT 16 ++ ++#define RADEON_VS_MATRIX_0_ADDR 0 ++#define RADEON_VS_MATRIX_1_ADDR 4 ++#define RADEON_VS_MATRIX_2_ADDR 8 ++#define RADEON_VS_MATRIX_3_ADDR 12 ++#define RADEON_VS_MATRIX_4_ADDR 16 ++#define RADEON_VS_MATRIX_5_ADDR 20 ++#define RADEON_VS_MATRIX_6_ADDR 24 ++#define RADEON_VS_MATRIX_7_ADDR 28 ++#define RADEON_VS_MATRIX_8_ADDR 32 ++#define RADEON_VS_MATRIX_9_ADDR 36 ++#define RADEON_VS_MATRIX_10_ADDR 40 ++#define RADEON_VS_MATRIX_11_ADDR 44 ++#define RADEON_VS_MATRIX_12_ADDR 48 ++#define RADEON_VS_MATRIX_13_ADDR 52 ++#define RADEON_VS_MATRIX_14_ADDR 56 ++#define RADEON_VS_MATRIX_15_ADDR 60 ++#define RADEON_VS_LIGHT_AMBIENT_ADDR 64 ++#define RADEON_VS_LIGHT_DIFFUSE_ADDR 72 ++#define RADEON_VS_LIGHT_SPECULAR_ADDR 80 ++#define RADEON_VS_LIGHT_DIRPOS_ADDR 88 ++#define RADEON_VS_LIGHT_HWVSPOT_ADDR 96 ++#define RADEON_VS_LIGHT_ATTENUATION_ADDR 104 ++#define RADEON_VS_MATRIX_EYE2CLIP_ADDR 112 ++#define RADEON_VS_UCP_ADDR 116 ++#define RADEON_VS_GLOBAL_AMBIENT_ADDR 122 ++#define RADEON_VS_FOG_PARAM_ADDR 123 ++#define RADEON_VS_EYE_VECTOR_ADDR 124 ++ ++#define RADEON_SS_LIGHT_DCD_ADDR 0 ++#define RADEON_SS_LIGHT_SPOT_EXPONENT_ADDR 8 ++#define RADEON_SS_LIGHT_SPOT_CUTOFF_ADDR 16 ++#define RADEON_SS_LIGHT_SPECULAR_THRESH_ADDR 24 ++#define RADEON_SS_LIGHT_RANGE_CUTOFF_ADDR 32 ++#define RADEON_SS_VERT_GUARD_CLIP_ADJ_ADDR 48 ++#define RADEON_SS_VERT_GUARD_DISCARD_ADJ_ADDR 49 ++#define RADEON_SS_HORZ_GUARD_CLIP_ADJ_ADDR 50 ++#define RADEON_SS_HORZ_GUARD_DISCARD_ADJ_ADDR 51 ++#define RADEON_SS_SHININESS 60 ++ ++#define RADEON_TV_MASTER_CNTL 0x0800 ++# define RADEON_TV_ASYNC_RST (1 << 0) ++# define RADEON_CRT_ASYNC_RST (1 << 1) ++# define RADEON_RESTART_PHASE_FIX (1 << 3) ++# define RADEON_TV_FIFO_ASYNC_RST (1 << 4) ++# define RADEON_VIN_ASYNC_RST (1 << 5) ++# define RADEON_AUD_ASYNC_RST (1 << 6) ++# define RADEON_DVS_ASYNC_RST (1 << 7) ++# define RADEON_CRT_FIFO_CE_EN (1 << 9) ++# define RADEON_TV_FIFO_CE_EN (1 << 10) ++# define RADEON_RE_SYNC_NOW_SEL_MASK (3 << 14) ++# define RADEON_TVCLK_ALWAYS_ONb (1 << 30) ++# define RADEON_TV_ON (1 << 31) ++#define RADEON_TV_PRE_DAC_MUX_CNTL 0x0888 ++# define RADEON_Y_RED_EN (1 << 0) ++# define RADEON_C_GRN_EN (1 << 1) ++# define RADEON_CMP_BLU_EN (1 << 2) ++# define RADEON_DAC_DITHER_EN (1 << 3) ++# define RADEON_RED_MX_FORCE_DAC_DATA (6 << 4) ++# define RADEON_GRN_MX_FORCE_DAC_DATA (6 << 8) ++# define RADEON_BLU_MX_FORCE_DAC_DATA (6 << 12) ++# define RADEON_TV_FORCE_DAC_DATA_SHIFT 16 ++#define RADEON_TV_RGB_CNTL 0x0804 ++# define RADEON_SWITCH_TO_BLUE (1 << 4) ++# define RADEON_RGB_DITHER_EN (1 << 5) ++# define RADEON_RGB_SRC_SEL_MASK (3 << 8) ++# define RADEON_RGB_SRC_SEL_CRTC1 (0 << 8) ++# define RADEON_RGB_SRC_SEL_RMX (1 << 8) ++# define RADEON_RGB_SRC_SEL_CRTC2 (2 << 8) ++# define RADEON_RGB_CONVERT_BY_PASS (1 << 10) ++# define RADEON_UVRAM_READ_MARGIN_SHIFT 16 ++# define RADEON_FIFORAM_FFMACRO_READ_MARGIN_SHIFT 20 ++# define RADEON_TVOUT_SCALE_EN (1 << 26) ++#define RADEON_TV_SYNC_CNTL 0x0808 ++# define RADEON_SYNC_OE (1 << 0) ++# define RADEON_SYNC_OUT (1 << 1) ++# define RADEON_SYNC_IN (1 << 2) ++# define RADEON_SYNC_PUB (1 << 3) ++# define RADEON_SYNC_PD (1 << 4) ++# define RADEON_TV_SYNC_IO_DRIVE (1 << 5) ++#define RADEON_TV_HTOTAL 0x080c ++#define RADEON_TV_HDISP 0x0810 ++#define RADEON_TV_HSTART 0x0818 ++#define RADEON_TV_HCOUNT 0x081C ++#define RADEON_TV_VTOTAL 0x0820 ++#define RADEON_TV_VDISP 0x0824 ++#define RADEON_TV_VCOUNT 0x0828 ++#define RADEON_TV_FTOTAL 0x082c ++#define RADEON_TV_FCOUNT 0x0830 ++#define RADEON_TV_FRESTART 0x0834 ++#define RADEON_TV_HRESTART 0x0838 ++#define RADEON_TV_VRESTART 0x083c ++#define RADEON_TV_HOST_READ_DATA 0x0840 ++#define RADEON_TV_HOST_WRITE_DATA 0x0844 ++#define RADEON_TV_HOST_RD_WT_CNTL 0x0848 ++# define RADEON_HOST_FIFO_RD (1 << 12) ++# define RADEON_HOST_FIFO_RD_ACK (1 << 13) ++# define RADEON_HOST_FIFO_WT (1 << 14) ++# define RADEON_HOST_FIFO_WT_ACK (1 << 15) ++#define RADEON_TV_VSCALER_CNTL1 0x084c ++# define RADEON_UV_INC_MASK 0xffff ++# define RADEON_UV_INC_SHIFT 0 ++# define RADEON_Y_W_EN (1 << 24) ++# define RADEON_RESTART_FIELD (1 << 29) /* restart on field 0 */ ++# define RADEON_Y_DEL_W_SIG_SHIFT 26 ++#define RADEON_TV_TIMING_CNTL 0x0850 ++# define RADEON_H_INC_MASK 0xfff ++# define RADEON_H_INC_SHIFT 0 ++# define RADEON_REQ_Y_FIRST (1 << 19) ++# define RADEON_FORCE_BURST_ALWAYS (1 << 21) ++# define RADEON_UV_POST_SCALE_BYPASS (1 << 23) ++# define RADEON_UV_OUTPUT_POST_SCALE_SHIFT 24 ++#define RADEON_TV_VSCALER_CNTL2 0x0854 ++# define RADEON_DITHER_MODE (1 << 0) ++# define RADEON_Y_OUTPUT_DITHER_EN (1 << 1) ++# define RADEON_UV_OUTPUT_DITHER_EN (1 << 2) ++# define RADEON_UV_TO_BUF_DITHER_EN (1 << 3) ++#define RADEON_TV_Y_FALL_CNTL 0x0858 ++# define RADEON_Y_FALL_PING_PONG (1 << 16) ++# define RADEON_Y_COEF_EN (1 << 17) ++#define RADEON_TV_Y_RISE_CNTL 0x085c ++# define RADEON_Y_RISE_PING_PONG (1 << 16) ++#define RADEON_TV_Y_SAW_TOOTH_CNTL 0x0860 ++#define RADEON_TV_UPSAMP_AND_GAIN_CNTL 0x0864 ++# define RADEON_YUPSAMP_EN (1 << 0) ++# define RADEON_UVUPSAMP_EN (1 << 2) ++#define RADEON_TV_GAIN_LIMIT_SETTINGS 0x0868 ++# define RADEON_Y_GAIN_LIMIT_SHIFT 0 ++# define RADEON_UV_GAIN_LIMIT_SHIFT 16 ++#define RADEON_TV_LINEAR_GAIN_SETTINGS 0x086c ++# define RADEON_Y_GAIN_SHIFT 0 ++# define RADEON_UV_GAIN_SHIFT 16 ++#define RADEON_TV_MODULATOR_CNTL1 0x0870 ++# define RADEON_YFLT_EN (1 << 2) ++# define RADEON_UVFLT_EN (1 << 3) ++# define RADEON_ALT_PHASE_EN (1 << 6) ++# define RADEON_SYNC_TIP_LEVEL (1 << 7) ++# define RADEON_BLANK_LEVEL_SHIFT 8 ++# define RADEON_SET_UP_LEVEL_SHIFT 16 ++# define RADEON_SLEW_RATE_LIMIT (1 << 23) ++# define RADEON_CY_FILT_BLEND_SHIFT 28 ++#define RADEON_TV_MODULATOR_CNTL2 0x0874 ++# define RADEON_TV_U_BURST_LEVEL_MASK 0x1ff ++# define RADEON_TV_V_BURST_LEVEL_MASK 0x1ff ++# define RADEON_TV_V_BURST_LEVEL_SHIFT 16 ++#define RADEON_TV_CRC_CNTL 0x0890 ++#define RADEON_TV_UV_ADR 0x08ac ++# define RADEON_MAX_UV_ADR_MASK 0x000000ff ++# define RADEON_MAX_UV_ADR_SHIFT 0 ++# define RADEON_TABLE1_BOT_ADR_MASK 0x0000ff00 ++# define RADEON_TABLE1_BOT_ADR_SHIFT 8 ++# define RADEON_TABLE3_TOP_ADR_MASK 0x00ff0000 ++# define RADEON_TABLE3_TOP_ADR_SHIFT 16 ++# define RADEON_HCODE_TABLE_SEL_MASK 0x06000000 ++# define RADEON_HCODE_TABLE_SEL_SHIFT 25 ++# define RADEON_VCODE_TABLE_SEL_MASK 0x18000000 ++# define RADEON_VCODE_TABLE_SEL_SHIFT 27 ++# define RADEON_TV_MAX_FIFO_ADDR 0x1a7 ++# define RADEON_TV_MAX_FIFO_ADDR_INTERNAL 0x1ff ++#define RADEON_TV_PLL_FINE_CNTL 0x0020 /* PLL */ ++#define RADEON_TV_PLL_CNTL 0x0021 /* PLL */ ++# define RADEON_TV_M0LO_MASK 0xff ++# define RADEON_TV_M0HI_MASK 0x7 ++# define RADEON_TV_M0HI_SHIFT 18 ++# define RADEON_TV_N0LO_MASK 0x1ff ++# define RADEON_TV_N0LO_SHIFT 8 ++# define RADEON_TV_N0HI_MASK 0x3 ++# define RADEON_TV_N0HI_SHIFT 21 ++# define RADEON_TV_P_MASK 0xf ++# define RADEON_TV_P_SHIFT 24 ++# define RADEON_TV_SLIP_EN (1 << 23) ++# define RADEON_TV_DTO_EN (1 << 28) ++#define RADEON_TV_PLL_CNTL1 0x0022 /* PLL */ ++# define RADEON_TVPLL_RESET (1 << 1) ++# define RADEON_TVPLL_SLEEP (1 << 3) ++# define RADEON_TVPLL_REFCLK_SEL (1 << 4) ++# define RADEON_TVPCP_SHIFT 8 ++# define RADEON_TVPCP_MASK (7 << 8) ++# define RADEON_TVPVG_SHIFT 11 ++# define RADEON_TVPVG_MASK (7 << 11) ++# define RADEON_TVPDC_SHIFT 14 ++# define RADEON_TVPDC_MASK (3 << 14) ++# define RADEON_TVPLL_TEST_DIS (1 << 31) ++# define RADEON_TVCLK_SRC_SEL_TVPLL (1 << 30) ++ ++#define RS400_DISP2_REQ_CNTL1 0xe30 ++# define RS400_DISP2_START_REQ_LEVEL_SHIFT 0 ++# define RS400_DISP2_START_REQ_LEVEL_MASK 0x3ff ++# define RS400_DISP2_STOP_REQ_LEVEL_SHIFT 12 ++# define RS400_DISP2_STOP_REQ_LEVEL_MASK 0x3ff ++# define RS400_DISP2_ALLOW_FID_LEVEL_SHIFT 22 ++# define RS400_DISP2_ALLOW_FID_LEVEL_MASK 0x3ff ++#define RS400_DISP2_REQ_CNTL2 0xe34 ++# define RS400_DISP2_CRITICAL_POINT_START_SHIFT 12 ++# define RS400_DISP2_CRITICAL_POINT_START_MASK 0x3ff ++# define RS400_DISP2_CRITICAL_POINT_STOP_SHIFT 22 ++# define RS400_DISP2_CRITICAL_POINT_STOP_MASK 0x3ff ++#define RS400_DMIF_MEM_CNTL1 0xe38 ++# define RS400_DISP2_START_ADR_SHIFT 0 ++# define RS400_DISP2_START_ADR_MASK 0x3ff ++# define RS400_DISP1_CRITICAL_POINT_START_SHIFT 12 ++# define RS400_DISP1_CRITICAL_POINT_START_MASK 0x3ff ++# define RS400_DISP1_CRITICAL_POINT_STOP_SHIFT 22 ++# define RS400_DISP1_CRITICAL_POINT_STOP_MASK 0x3ff ++#define RS400_DISP1_REQ_CNTL1 0xe3c ++# define RS400_DISP1_START_REQ_LEVEL_SHIFT 0 ++# define RS400_DISP1_START_REQ_LEVEL_MASK 0x3ff ++# define RS400_DISP1_STOP_REQ_LEVEL_SHIFT 12 ++# define RS400_DISP1_STOP_REQ_LEVEL_MASK 0x3ff ++# define RS400_DISP1_ALLOW_FID_LEVEL_SHIFT 22 ++# define RS400_DISP1_ALLOW_FID_LEVEL_MASK 0x3ff ++ ++#define RS690_MC_INDEX 0x78 ++# define RS690_MC_INDEX_MASK 0x1ff ++# define RS690_MC_INDEX_WR_EN (1 << 9) ++# define RS690_MC_INDEX_WR_ACK 0x7f ++#define RS690_MC_DATA 0x7c ++ ++#define RS690_MC_FB_LOCATION 0x100 ++#define RS690_MC_AGP_LOCATION 0x101 ++#define RS690_MC_AGP_BASE 0x102 ++#define RS690_MC_AGP_BASE_2 0x103 ++#define RS690_MC_STATUS 0x90 ++#define RS690_MC_STATUS_IDLE (1 << 0) ++ ++#define RS600_MC_INDEX 0x78 ++# define RS600_MC_INDEX_MASK 0xff ++# define RS600_MC_INDEX_WR_EN (1 << 8) ++# define RS600_MC_INDEX_WR_ACK 0xff ++#define RS600_MC_DATA 0x7c ++ ++#define RS600_MC_FB_LOCATION 0xA ++#define RS600_MC_STATUS 0x0 ++#define RS600_MC_STATUS_IDLE (1 << 0) ++ ++#define AVIVO_MC_INDEX 0x0070 ++#define R520_MC_STATUS 0x00 ++#define R520_MC_STATUS_IDLE (1<<1) ++#define RV515_MC_STATUS 0x08 ++#define RV515_MC_STATUS_IDLE (1<<4) ++#define AVIVO_MC_DATA 0x0074 ++ ++#define RV515_MC_FB_LOCATION 0x1 ++#define RV515_MC_AGP_LOCATION 0x2 ++#define RV515_MC_AGP_BASE 0x3 ++#define RV515_MC_AGP_BASE_2 0x4 ++#define RV515_MC_CNTL 0x5 ++# define RV515_MEM_NUM_CHANNELS_MASK 0x3 ++#define R520_MC_FB_LOCATION 0x4 ++#define R520_MC_AGP_LOCATION 0x5 ++#define R520_MC_AGP_BASE 0x6 ++#define R520_MC_AGP_BASE_2 0x7 ++#define R520_MC_CNTL0 0x8 ++# define R520_MEM_NUM_CHANNELS_MASK (0x3 << 24) ++# define R520_MEM_NUM_CHANNELS_SHIFT 24 ++# define R520_MC_CHANNEL_SIZE (1 << 23) ++ ++#define R600_RAMCFG 0x2408 ++# define R600_CHANSIZE (1 << 7) ++# define R600_CHANSIZE_OVERRIDE (1 << 10) ++ ++#define AVIVO_HDP_FB_LOCATION 0x134 ++ ++#define AVIVO_VGA_RENDER_CONTROL 0x0300 ++# define AVIVO_VGA_VSTATUS_CNTL_MASK (3 << 16) ++#define AVIVO_D1VGA_CONTROL 0x0330 ++# define AVIVO_DVGA_CONTROL_MODE_ENABLE (1<<0) ++# define AVIVO_DVGA_CONTROL_TIMING_SELECT (1<<8) ++# define AVIVO_DVGA_CONTROL_SYNC_POLARITY_SELECT (1<<9) ++# define AVIVO_DVGA_CONTROL_OVERSCAN_TIMING_SELECT (1<<10) ++# define AVIVO_DVGA_CONTROL_OVERSCAN_COLOR_EN (1<<16) ++# define AVIVO_DVGA_CONTROL_ROTATE (1<<24) ++#define AVIVO_D2VGA_CONTROL 0x0338 ++ ++#define AVIVO_EXT1_PPLL_REF_DIV_SRC 0x400 ++#define AVIVO_EXT1_PPLL_REF_DIV 0x404 ++#define AVIVO_EXT1_PPLL_UPDATE_LOCK 0x408 ++#define AVIVO_EXT1_PPLL_UPDATE_CNTL 0x40c ++ ++#define AVIVO_EXT2_PPLL_REF_DIV_SRC 0x410 ++#define AVIVO_EXT2_PPLL_REF_DIV 0x414 ++#define AVIVO_EXT2_PPLL_UPDATE_LOCK 0x418 ++#define AVIVO_EXT2_PPLL_UPDATE_CNTL 0x41c ++ ++#define AVIVO_EXT1_PPLL_FB_DIV 0x430 ++#define AVIVO_EXT2_PPLL_FB_DIV 0x434 ++ ++#define AVIVO_EXT1_PPLL_POST_DIV_SRC 0x438 ++#define AVIVO_EXT1_PPLL_POST_DIV 0x43c ++ ++#define AVIVO_EXT2_PPLL_POST_DIV_SRC 0x440 ++#define AVIVO_EXT2_PPLL_POST_DIV 0x444 ++ ++#define AVIVO_EXT1_PPLL_CNTL 0x448 ++#define AVIVO_EXT2_PPLL_CNTL 0x44c ++ ++#define AVIVO_P1PLL_CNTL 0x450 ++#define AVIVO_P2PLL_CNTL 0x454 ++#define AVIVO_P1PLL_INT_SS_CNTL 0x458 ++#define AVIVO_P2PLL_INT_SS_CNTL 0x45c ++#define AVIVO_P1PLL_TMDSA_CNTL 0x460 ++#define AVIVO_P2PLL_LVTMA_CNTL 0x464 ++ ++#define AVIVO_PCLK_CRTC1_CNTL 0x480 ++#define AVIVO_PCLK_CRTC2_CNTL 0x484 ++ ++#define AVIVO_D1CRTC_H_TOTAL 0x6000 ++#define AVIVO_D1CRTC_H_BLANK_START_END 0x6004 ++#define AVIVO_D1CRTC_H_SYNC_A 0x6008 ++#define AVIVO_D1CRTC_H_SYNC_A_CNTL 0x600c ++#define AVIVO_D1CRTC_H_SYNC_B 0x6010 ++#define AVIVO_D1CRTC_H_SYNC_B_CNTL 0x6014 ++ ++#define AVIVO_D1CRTC_V_TOTAL 0x6020 ++#define AVIVO_D1CRTC_V_BLANK_START_END 0x6024 ++#define AVIVO_D1CRTC_V_SYNC_A 0x6028 ++#define AVIVO_D1CRTC_V_SYNC_A_CNTL 0x602c ++#define AVIVO_D1CRTC_V_SYNC_B 0x6030 ++#define AVIVO_D1CRTC_V_SYNC_B_CNTL 0x6034 ++ ++#define AVIVO_D1CRTC_CONTROL 0x6080 ++# define AVIVO_CRTC_EN (1 << 0) ++#define AVIVO_D1CRTC_BLANK_CONTROL 0x6084 ++#define AVIVO_D1CRTC_INTERLACE_CONTROL 0x6088 ++#define AVIVO_D1CRTC_INTERLACE_STATUS 0x608c ++#define AVIVO_D1CRTC_STEREO_CONTROL 0x60c4 ++ ++/* master controls */ ++#define AVIVO_DC_CRTC_MASTER_EN 0x60f8 ++#define AVIVO_DC_CRTC_TV_CONTROL 0x60fc ++ ++#define AVIVO_D1GRPH_ENABLE 0x6100 ++#define AVIVO_D1GRPH_CONTROL 0x6104 ++# define AVIVO_D1GRPH_CONTROL_DEPTH_8BPP (0 << 0) ++# define AVIVO_D1GRPH_CONTROL_DEPTH_16BPP (1 << 0) ++# define AVIVO_D1GRPH_CONTROL_DEPTH_32BPP (2 << 0) ++# define AVIVO_D1GRPH_CONTROL_DEPTH_64BPP (3 << 0) ++ ++# define AVIVO_D1GRPH_CONTROL_8BPP_INDEXED (0 << 8) ++ ++# define AVIVO_D1GRPH_CONTROL_16BPP_ARGB1555 (0 << 8) ++# define AVIVO_D1GRPH_CONTROL_16BPP_RGB565 (1 << 8) ++# define AVIVO_D1GRPH_CONTROL_16BPP_ARGB4444 (2 << 8) ++# define AVIVO_D1GRPH_CONTROL_16BPP_AI88 (3 << 8) ++# define AVIVO_D1GRPH_CONTROL_16BPP_MONO16 (4 << 8) ++ ++# define AVIVO_D1GRPH_CONTROL_32BPP_ARGB8888 (0 << 8) ++# define AVIVO_D1GRPH_CONTROL_32BPP_ARGB2101010 (1 << 8) ++# define AVIVO_D1GRPH_CONTROL_32BPP_DIGITAL (2 << 8) ++# define AVIVO_D1GRPH_CONTROL_32BPP_8B_ARGB2101010 (3 << 8) ++ ++ ++# define AVIVO_D1GRPH_CONTROL_64BPP_ARGB16161616 (0 << 8) ++ ++# define AVIVO_D1GRPH_SWAP_RB (1 << 16) ++# define AVIVO_D1GRPH_TILED (1 << 20) ++# define AVIVO_D1GRPH_MACRO_ADDRESS_MODE (1 << 21) ++ ++#define AVIVO_D1GRPH_LUT_SEL 0x6108 ++#define AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS 0x6110 ++#define AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS 0x6118 ++#define AVIVO_D1GRPH_PITCH 0x6120 ++#define AVIVO_D1GRPH_SURFACE_OFFSET_X 0x6124 ++#define AVIVO_D1GRPH_SURFACE_OFFSET_Y 0x6128 ++#define AVIVO_D1GRPH_X_START 0x612c ++#define AVIVO_D1GRPH_Y_START 0x6130 ++#define AVIVO_D1GRPH_X_END 0x6134 ++#define AVIVO_D1GRPH_Y_END 0x6138 ++#define AVIVO_D1GRPH_UPDATE 0x6144 ++# define AVIVO_D1GRPH_UPDATE_LOCK (1 << 16) ++#define AVIVO_D1GRPH_FLIP_CONTROL 0x6148 ++ ++#define AVIVO_D1CUR_CONTROL 0x6400 ++# define AVIVO_D1CURSOR_EN (1 << 0) ++# define AVIVO_D1CURSOR_MODE_SHIFT 8 ++# define AVIVO_D1CURSOR_MODE_MASK (3 << 8) ++# define AVIVO_D1CURSOR_MODE_24BPP 2 ++#define AVIVO_D1CUR_SURFACE_ADDRESS 0x6408 ++#define AVIVO_D1CUR_SIZE 0x6410 ++#define AVIVO_D1CUR_POSITION 0x6414 ++#define AVIVO_D1CUR_HOT_SPOT 0x6418 ++#define AVIVO_D1CUR_UPDATE 0x6424 ++# define AVIVO_D1CURSOR_UPDATE_LOCK (1 << 16) ++ ++#define AVIVO_DC_LUT_RW_SELECT 0x6480 ++#define AVIVO_DC_LUT_RW_MODE 0x6484 ++#define AVIVO_DC_LUT_RW_INDEX 0x6488 ++#define AVIVO_DC_LUT_SEQ_COLOR 0x648c ++#define AVIVO_DC_LUT_PWL_DATA 0x6490 ++#define AVIVO_DC_LUT_30_COLOR 0x6494 ++#define AVIVO_DC_LUT_READ_PIPE_SELECT 0x6498 ++#define AVIVO_DC_LUT_WRITE_EN_MASK 0x649c ++#define AVIVO_DC_LUT_AUTOFILL 0x64a0 ++ ++#define AVIVO_DC_LUTA_CONTROL 0x64c0 ++#define AVIVO_DC_LUTA_BLACK_OFFSET_BLUE 0x64c4 ++#define AVIVO_DC_LUTA_BLACK_OFFSET_GREEN 0x64c8 ++#define AVIVO_DC_LUTA_BLACK_OFFSET_RED 0x64cc ++#define AVIVO_DC_LUTA_WHITE_OFFSET_BLUE 0x64d0 ++#define AVIVO_DC_LUTA_WHITE_OFFSET_GREEN 0x64d4 ++#define AVIVO_DC_LUTA_WHITE_OFFSET_RED 0x64d8 ++ ++#define AVIVO_D1MODE_DATA_FORMAT 0x6528 ++# define AVIVO_D1MODE_INTERLEAVE_EN (1 << 0) ++#define AVIVO_D1MODE_DESKTOP_HEIGHT 0x652C ++#define AVIVO_D1MODE_VIEWPORT_START 0x6580 ++#define AVIVO_D1MODE_VIEWPORT_SIZE 0x6584 ++#define AVIVO_D1MODE_EXT_OVERSCAN_LEFT_RIGHT 0x6588 ++#define AVIVO_D1MODE_EXT_OVERSCAN_TOP_BOTTOM 0x658c ++ ++#define AVIVO_D1SCL_SCALER_ENABLE 0x6590 ++#define AVIVO_D1SCL_SCALER_TAP_CONTROL 0x6594 ++#define AVIVO_D1SCL_UPDATE 0x65cc ++# define AVIVO_D1SCL_UPDATE_LOCK (1 << 16) ++ ++/* second crtc */ ++#define AVIVO_D2CRTC_H_TOTAL 0x6800 ++#define AVIVO_D2CRTC_H_BLANK_START_END 0x6804 ++#define AVIVO_D2CRTC_H_SYNC_A 0x6808 ++#define AVIVO_D2CRTC_H_SYNC_A_CNTL 0x680c ++#define AVIVO_D2CRTC_H_SYNC_B 0x6810 ++#define AVIVO_D2CRTC_H_SYNC_B_CNTL 0x6814 ++ ++#define AVIVO_D2CRTC_V_TOTAL 0x6820 ++#define AVIVO_D2CRTC_V_BLANK_START_END 0x6824 ++#define AVIVO_D2CRTC_V_SYNC_A 0x6828 ++#define AVIVO_D2CRTC_V_SYNC_A_CNTL 0x682c ++#define AVIVO_D2CRTC_V_SYNC_B 0x6830 ++#define AVIVO_D2CRTC_V_SYNC_B_CNTL 0x6834 ++ ++#define AVIVO_D2CRTC_CONTROL 0x6880 ++#define AVIVO_D2CRTC_BLANK_CONTROL 0x6884 ++#define AVIVO_D2CRTC_INTERLACE_CONTROL 0x6888 ++#define AVIVO_D2CRTC_INTERLACE_STATUS 0x688c ++#define AVIVO_D2CRTC_STEREO_CONTROL 0x68c4 ++ ++#define AVIVO_D2GRPH_ENABLE 0x6900 ++#define AVIVO_D2GRPH_CONTROL 0x6904 ++#define AVIVO_D2GRPH_LUT_SEL 0x6908 ++#define AVIVO_D2GRPH_PRIMARY_SURFACE_ADDRESS 0x6910 ++#define AVIVO_D2GRPH_SECONDARY_SURFACE_ADDRESS 0x6918 ++#define AVIVO_D2GRPH_PITCH 0x6920 ++#define AVIVO_D2GRPH_SURFACE_OFFSET_X 0x6924 ++#define AVIVO_D2GRPH_SURFACE_OFFSET_Y 0x6928 ++#define AVIVO_D2GRPH_X_START 0x692c ++#define AVIVO_D2GRPH_Y_START 0x6930 ++#define AVIVO_D2GRPH_X_END 0x6934 ++#define AVIVO_D2GRPH_Y_END 0x6938 ++#define AVIVO_D2GRPH_UPDATE 0x6944 ++#define AVIVO_D2GRPH_FLIP_CONTROL 0x6948 ++ ++#define AVIVO_D2CUR_CONTROL 0x6c00 ++#define AVIVO_D2CUR_SURFACE_ADDRESS 0x6c08 ++#define AVIVO_D2CUR_SIZE 0x6c10 ++#define AVIVO_D2CUR_POSITION 0x6c14 ++ ++#define AVIVO_D2MODE_VIEWPORT_START 0x6d80 ++#define AVIVO_D2MODE_VIEWPORT_SIZE 0x6d84 ++#define AVIVO_D2MODE_EXT_OVERSCAN_LEFT_RIGHT 0x6d88 ++#define AVIVO_D2MODE_EXT_OVERSCAN_TOP_BOTTOM 0x6d8c ++ ++#define AVIVO_D2SCL_SCALER_ENABLE 0x6d90 ++#define AVIVO_D2SCL_SCALER_TAP_CONTROL 0x6d94 ++ ++#define AVIVO_DDIA_BIT_DEPTH_CONTROL 0x7214 ++ ++#define AVIVO_DACA_ENABLE 0x7800 ++# define AVIVO_DAC_ENABLE (1 << 0) ++#define AVIVO_DACA_SOURCE_SELECT 0x7804 ++# define AVIVO_DAC_SOURCE_CRTC1 (0 << 0) ++# define AVIVO_DAC_SOURCE_CRTC2 (1 << 0) ++# define AVIVO_DAC_SOURCE_TV (2 << 0) ++ ++#define AVIVO_DACA_FORCE_OUTPUT_CNTL 0x783c ++# define AVIVO_DACA_FORCE_OUTPUT_CNTL_FORCE_DATA_EN (1 << 0) ++# define AVIVO_DACA_FORCE_OUTPUT_CNTL_DATA_SEL_SHIFT (8) ++# define AVIVO_DACA_FORCE_OUTPUT_CNTL_DATA_SEL_BLUE (1 << 0) ++# define AVIVO_DACA_FORCE_OUTPUT_CNTL_DATA_SEL_GREEN (1 << 1) ++# define AVIVO_DACA_FORCE_OUTPUT_CNTL_DATA_SEL_RED (1 << 2) ++# define AVIVO_DACA_FORCE_OUTPUT_CNTL_DATA_ON_BLANKB_ONLY (1 << 24) ++#define AVIVO_DACA_POWERDOWN 0x7850 ++# define AVIVO_DACA_POWERDOWN_POWERDOWN (1 << 0) ++# define AVIVO_DACA_POWERDOWN_BLUE (1 << 8) ++# define AVIVO_DACA_POWERDOWN_GREEN (1 << 16) ++# define AVIVO_DACA_POWERDOWN_RED (1 << 24) ++ ++#define AVIVO_DACB_ENABLE 0x7a00 ++#define AVIVO_DACB_SOURCE_SELECT 0x7a04 ++#define AVIVO_DACB_FORCE_OUTPUT_CNTL 0x7a3c ++# define AVIVO_DACB_FORCE_OUTPUT_CNTL_FORCE_DATA_EN (1 << 0) ++# define AVIVO_DACB_FORCE_OUTPUT_CNTL_DATA_SEL_SHIFT (8) ++# define AVIVO_DACB_FORCE_OUTPUT_CNTL_DATA_SEL_BLUE (1 << 0) ++# define AVIVO_DACB_FORCE_OUTPUT_CNTL_DATA_SEL_GREEN (1 << 1) ++# define AVIVO_DACB_FORCE_OUTPUT_CNTL_DATA_SEL_RED (1 << 2) ++# define AVIVO_DACB_FORCE_OUTPUT_CNTL_DATA_ON_BLANKB_ONLY (1 << 24) ++#define AVIVO_DACB_POWERDOWN 0x7a50 ++# define AVIVO_DACB_POWERDOWN_POWERDOWN (1 << 0) ++# define AVIVO_DACB_POWERDOWN_BLUE (1 << 8) ++# define AVIVO_DACB_POWERDOWN_GREEN (1 << 16) ++# define AVIVO_DACB_POWERDOWN_RED ++ ++#define AVIVO_TMDSA_CNTL 0x7880 ++# define AVIVO_TMDSA_CNTL_ENABLE (1 << 0) ++# define AVIVO_TMDSA_CNTL_HPD_MASK (1 << 4) ++# define AVIVO_TMDSA_CNTL_HPD_SELECT (1 << 8) ++# define AVIVO_TMDSA_CNTL_SYNC_PHASE (1 << 12) ++# define AVIVO_TMDSA_CNTL_PIXEL_ENCODING (1 << 16) ++# define AVIVO_TMDSA_CNTL_DUAL_LINK_ENABLE (1 << 24) ++# define AVIVO_TMDSA_CNTL_SWAP (1 << 28) ++#define AVIVO_TMDSA_SOURCE_SELECT 0x7884 ++/* 78a8 appears to be some kind of (reasonably tolerant) clock? ++ * 78d0 definitely hits the transmitter, definitely clock. */ ++/* MYSTERY1 This appears to control dithering? */ ++#define AVIVO_TMDSA_BIT_DEPTH_CONTROL 0x7894 ++# define AVIVO_TMDS_BIT_DEPTH_CONTROL_TRUNCATE_EN (1 << 0) ++# define AVIVO_TMDS_BIT_DEPTH_CONTROL_TRUNCATE_DEPTH (1 << 4) ++# define AVIVO_TMDS_BIT_DEPTH_CONTROL_SPATIAL_DITHER_EN (1 << 8) ++# define AVIVO_TMDS_BIT_DEPTH_CONTROL_SPATIAL_DITHER_DEPTH (1 << 12) ++# define AVIVO_TMDS_BIT_DEPTH_CONTROL_TEMPORAL_DITHER_EN (1 << 16) ++# define AVIVO_TMDS_BIT_DEPTH_CONTROL_TEMPORAL_DITHER_DEPTH (1 << 20) ++# define AVIVO_TMDS_BIT_DEPTH_CONTROL_TEMPORAL_LEVEL (1 << 24) ++# define AVIVO_TMDS_BIT_DEPTH_CONTROL_TEMPORAL_DITHER_RESET (1 << 26) ++#define AVIVO_TMDSA_DCBALANCER_CONTROL 0x78d0 ++# define AVIVO_TMDSA_DCBALANCER_CONTROL_EN (1 << 0) ++# define AVIVO_TMDSA_DCBALANCER_CONTROL_TEST_EN (1 << 8) ++# define AVIVO_TMDSA_DCBALANCER_CONTROL_TEST_IN_SHIFT (16) ++# define AVIVO_TMDSA_DCBALANCER_CONTROL_FORCE (1 << 24) ++#define AVIVO_TMDSA_DATA_SYNCHRONIZATION 0x78d8 ++# define AVIVO_TMDSA_DATA_SYNCHRONIZATION_DSYNSEL (1 << 0) ++# define AVIVO_TMDSA_DATA_SYNCHRONIZATION_PFREQCHG (1 << 8) ++#define AVIVO_TMDSA_CLOCK_ENABLE 0x7900 ++#define AVIVO_TMDSA_TRANSMITTER_ENABLE 0x7904 ++# define AVIVO_TMDSA_TRANSMITTER_ENABLE_TX0_ENABLE (1 << 0) ++# define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKC0EN (1 << 1) ++# define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKD00EN (1 << 2) ++# define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKD01EN (1 << 3) ++# define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKD02EN (1 << 4) ++# define AVIVO_TMDSA_TRANSMITTER_ENABLE_TX1_ENABLE (1 << 8) ++# define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKD10EN (1 << 10) ++# define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKD11EN (1 << 11) ++# define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKD12EN (1 << 12) ++# define AVIVO_TMDSA_TRANSMITTER_ENABLE_TX_ENABLE_HPD_MASK (1 << 16) ++# define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKCEN_HPD_MASK (1 << 17) ++# define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKDEN_HPD_MASK (1 << 18) ++ ++#define AVIVO_TMDSA_TRANSMITTER_CONTROL 0x7910 ++# define AVIVO_TMDSA_TRANSMITTER_CONTROL_PLL_ENABLE (1 << 0) ++# define AVIVO_TMDSA_TRANSMITTER_CONTROL_PLL_RESET (1 << 1) ++# define AVIVO_TMDSA_TRANSMITTER_CONTROL_PLL_HPD_MASK_SHIFT (2) ++# define AVIVO_TMDSA_TRANSMITTER_CONTROL_IDSCKSEL (1 << 4) ++# define AVIVO_TMDSA_TRANSMITTER_CONTROL_BGSLEEP (1 << 5) ++# define AVIVO_TMDSA_TRANSMITTER_CONTROL_PLL_PWRUP_SEQ_EN (1 << 6) ++# define AVIVO_TMDSA_TRANSMITTER_CONTROL_TMCLK (1 << 8) ++# define AVIVO_TMDSA_TRANSMITTER_CONTROL_TMCLK_FROM_PADS (1 << 13) ++# define AVIVO_TMDSA_TRANSMITTER_CONTROL_TDCLK (1 << 14) ++# define AVIVO_TMDSA_TRANSMITTER_CONTROL_TDCLK_FROM_PADS (1 << 15) ++# define AVIVO_TMDSA_TRANSMITTER_CONTROL_CLK_PATTERN_SHIFT (16) ++# define AVIVO_TMDSA_TRANSMITTER_CONTROL_BYPASS_PLL (1 << 28) ++# define AVIVO_TMDSA_TRANSMITTER_CONTROL_USE_CLK_DATA (1 << 29) ++# define AVIVO_TMDSA_TRANSMITTER_CONTROL_INPUT_TEST_CLK_SEL (1 << 31) ++ ++#define AVIVO_LVTMA_CNTL 0x7a80 ++# define AVIVO_LVTMA_CNTL_ENABLE (1 << 0) ++# define AVIVO_LVTMA_CNTL_HPD_MASK (1 << 4) ++# define AVIVO_LVTMA_CNTL_HPD_SELECT (1 << 8) ++# define AVIVO_LVTMA_CNTL_SYNC_PHASE (1 << 12) ++# define AVIVO_LVTMA_CNTL_PIXEL_ENCODING (1 << 16) ++# define AVIVO_LVTMA_CNTL_DUAL_LINK_ENABLE (1 << 24) ++# define AVIVO_LVTMA_CNTL_SWAP (1 << 28) ++#define AVIVO_LVTMA_SOURCE_SELECT 0x7a84 ++#define AVIVO_LVTMA_COLOR_FORMAT 0x7a88 ++#define AVIVO_LVTMA_BIT_DEPTH_CONTROL 0x7a94 ++# define AVIVO_LVTMA_BIT_DEPTH_CONTROL_TRUNCATE_EN (1 << 0) ++# define AVIVO_LVTMA_BIT_DEPTH_CONTROL_TRUNCATE_DEPTH (1 << 4) ++# define AVIVO_LVTMA_BIT_DEPTH_CONTROL_SPATIAL_DITHER_EN (1 << 8) ++# define AVIVO_LVTMA_BIT_DEPTH_CONTROL_SPATIAL_DITHER_DEPTH (1 << 12) ++# define AVIVO_LVTMA_BIT_DEPTH_CONTROL_TEMPORAL_DITHER_EN (1 << 16) ++# define AVIVO_LVTMA_BIT_DEPTH_CONTROL_TEMPORAL_DITHER_DEPTH (1 << 20) ++# define AVIVO_LVTMA_BIT_DEPTH_CONTROL_TEMPORAL_LEVEL (1 << 24) ++# define AVIVO_LVTMA_BIT_DEPTH_CONTROL_TEMPORAL_DITHER_RESET (1 << 26) ++ ++ ++ ++#define AVIVO_LVTMA_DCBALANCER_CONTROL 0x7ad0 ++# define AVIVO_LVTMA_DCBALANCER_CONTROL_EN (1 << 0) ++# define AVIVO_LVTMA_DCBALANCER_CONTROL_TEST_EN (1 << 8) ++# define AVIVO_LVTMA_DCBALANCER_CONTROL_TEST_IN_SHIFT (16) ++# define AVIVO_LVTMA_DCBALANCER_CONTROL_FORCE (1 << 24) ++ ++#define AVIVO_LVTMA_DATA_SYNCHRONIZATION 0x78d8 ++# define AVIVO_LVTMA_DATA_SYNCHRONIZATION_DSYNSEL (1 << 0) ++# define AVIVO_LVTMA_DATA_SYNCHRONIZATION_PFREQCHG (1 << 8) ++#define R500_LVTMA_CLOCK_ENABLE 0x7b00 ++#define R600_LVTMA_CLOCK_ENABLE 0x7b04 ++ ++#define R500_LVTMA_TRANSMITTER_ENABLE 0x7b04 ++#define R600_LVTMA_TRANSMITTER_ENABLE 0x7b08 ++# define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKC0EN (1 << 1) ++# define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKD00EN (1 << 2) ++# define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKD01EN (1 << 3) ++# define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKD02EN (1 << 4) ++# define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKD03EN (1 << 5) ++# define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKC1EN (1 << 9) ++# define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKD10EN (1 << 10) ++# define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKD11EN (1 << 11) ++# define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKD12EN (1 << 12) ++# define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKCEN_HPD_MASK (1 << 17) ++# define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKDEN_HPD_MASK (1 << 18) ++ ++#define R500_LVTMA_TRANSMITTER_CONTROL 0x7b10 ++#define R600_LVTMA_TRANSMITTER_CONTROL 0x7b14 ++# define AVIVO_LVTMA_TRANSMITTER_CONTROL_PLL_ENABLE (1 << 0) ++# define AVIVO_LVTMA_TRANSMITTER_CONTROL_PLL_RESET (1 << 1) ++# define AVIVO_LVTMA_TRANSMITTER_CONTROL_PLL_HPD_MASK_SHIFT (2) ++# define AVIVO_LVTMA_TRANSMITTER_CONTROL_IDSCKSEL (1 << 4) ++# define AVIVO_LVTMA_TRANSMITTER_CONTROL_BGSLEEP (1 << 5) ++# define AVIVO_LVTMA_TRANSMITTER_CONTROL_PLL_PWRUP_SEQ_EN (1 << 6) ++# define AVIVO_LVTMA_TRANSMITTER_CONTROL_TMCLK (1 << 8) ++# define AVIVO_LVTMA_TRANSMITTER_CONTROL_TMCLK_FROM_PADS (1 << 13) ++# define AVIVO_LVTMA_TRANSMITTER_CONTROL_TDCLK (1 << 14) ++# define AVIVO_LVTMA_TRANSMITTER_CONTROL_TDCLK_FROM_PADS (1 << 15) ++# define AVIVO_LVTMA_TRANSMITTER_CONTROL_CLK_PATTERN_SHIFT (16) ++# define AVIVO_LVTMA_TRANSMITTER_CONTROL_BYPASS_PLL (1 << 28) ++# define AVIVO_LVTMA_TRANSMITTER_CONTROL_USE_CLK_DATA (1 << 29) ++# define AVIVO_LVTMA_TRANSMITTER_CONTROL_INPUT_TEST_CLK_SEL (1 << 31) ++ ++#define R500_LVTMA_PWRSEQ_CNTL 0x7af0 ++#define R600_LVTMA_PWRSEQ_CNTL 0x7af4 ++# define AVIVO_LVTMA_PWRSEQ_EN (1 << 0) ++# define AVIVO_LVTMA_PWRSEQ_PLL_ENABLE_MASK (1 << 2) ++# define AVIVO_LVTMA_PWRSEQ_PLL_RESET_MASK (1 << 3) ++# define AVIVO_LVTMA_PWRSEQ_TARGET_STATE (1 << 4) ++# define AVIVO_LVTMA_SYNCEN (1 << 8) ++# define AVIVO_LVTMA_SYNCEN_OVRD (1 << 9) ++# define AVIVO_LVTMA_SYNCEN_POL (1 << 10) ++# define AVIVO_LVTMA_DIGON (1 << 16) ++# define AVIVO_LVTMA_DIGON_OVRD (1 << 17) ++# define AVIVO_LVTMA_DIGON_POL (1 << 18) ++# define AVIVO_LVTMA_BLON (1 << 24) ++# define AVIVO_LVTMA_BLON_OVRD (1 << 25) ++# define AVIVO_LVTMA_BLON_POL (1 << 26) ++ ++#define R500_LVTMA_PWRSEQ_STATE 0x7af4 ++#define R600_LVTMA_PWRSEQ_STATE 0x7af8 ++# define AVIVO_LVTMA_PWRSEQ_STATE_TARGET_STATE_R (1 << 0) ++# define AVIVO_LVTMA_PWRSEQ_STATE_DIGON (1 << 1) ++# define AVIVO_LVTMA_PWRSEQ_STATE_SYNCEN (1 << 2) ++# define AVIVO_LVTMA_PWRSEQ_STATE_BLON (1 << 3) ++# define AVIVO_LVTMA_PWRSEQ_STATE_DONE (1 << 4) ++# define AVIVO_LVTMA_PWRSEQ_STATE_STATUS_SHIFT (8) ++ ++#define AVIVO_LVDS_BACKLIGHT_CNTL 0x7af8 ++# define AVIVO_LVDS_BACKLIGHT_CNTL_EN (1 << 0) ++# define AVIVO_LVDS_BACKLIGHT_LEVEL_MASK 0x0000ff00 ++# define AVIVO_LVDS_BACKLIGHT_LEVEL_SHIFT 8 ++ ++#define AVIVO_DVOA_BIT_DEPTH_CONTROL 0x7988 ++ ++#define AVIVO_GPIO_0 0x7e30 ++#define AVIVO_GPIO_1 0x7e40 ++#define AVIVO_GPIO_2 0x7e50 ++#define AVIVO_GPIO_3 0x7e60 ++ ++#define AVIVO_DC_GPIO_HPD_Y 0x7e9c ++ ++#define AVIVO_I2C_STATUS 0x7d30 ++# define AVIVO_I2C_STATUS_DONE (1 << 0) ++# define AVIVO_I2C_STATUS_NACK (1 << 1) ++# define AVIVO_I2C_STATUS_HALT (1 << 2) ++# define AVIVO_I2C_STATUS_GO (1 << 3) ++# define AVIVO_I2C_STATUS_MASK 0x7 ++/* If radeon_mm_i2c is to be believed, this is HALT, NACK, and maybe ++ * DONE? */ ++# define AVIVO_I2C_STATUS_CMD_RESET 0x7 ++# define AVIVO_I2C_STATUS_CMD_WAIT (1 << 3) ++#define AVIVO_I2C_STOP 0x7d34 ++#define AVIVO_I2C_START_CNTL 0x7d38 ++# define AVIVO_I2C_START (1 << 8) ++# define AVIVO_I2C_CONNECTOR0 (0 << 16) ++# define AVIVO_I2C_CONNECTOR1 (1 << 16) ++#define R520_I2C_START (1<<0) ++#define R520_I2C_STOP (1<<1) ++#define R520_I2C_RX (1<<2) ++#define R520_I2C_EN (1<<8) ++#define R520_I2C_DDC1 (0<<16) ++#define R520_I2C_DDC2 (1<<16) ++#define R520_I2C_DDC3 (2<<16) ++#define R520_I2C_DDC_MASK (3<<16) ++#define AVIVO_I2C_CONTROL2 0x7d3c ++# define AVIVO_I2C_7D3C_SIZE_SHIFT 8 ++# define AVIVO_I2C_7D3C_SIZE_MASK (0xf << 8) ++#define AVIVO_I2C_CONTROL3 0x7d40 ++/* Reading is done 4 bytes at a time: read the bottom 8 bits from ++ * 7d44, four times in a row. ++ * Writing is a little more complex. First write DATA with ++ * 0xnnnnnnzz, then 0xnnnnnnyy, where nnnnnn is some non-deterministic ++ * magic number, zz is, I think, the slave address, and yy is the byte ++ * you want to write. */ ++#define AVIVO_I2C_DATA 0x7d44 ++#define R520_I2C_ADDR_COUNT_MASK (0x7) ++#define R520_I2C_DATA_COUNT_SHIFT (8) ++#define R520_I2C_DATA_COUNT_MASK (0xF00) ++#define AVIVO_I2C_CNTL 0x7d50 ++# define AVIVO_I2C_EN (1 << 0) ++# define AVIVO_I2C_RESET (1 << 8) ++ ++#define R600_GENERAL_PWRMGT 0x618 ++# define R600_OPEN_DRAIN_PADS (1 << 11) ++ ++#define R600_LOWER_GPIO_ENABLE 0x710 ++#define R600_CTXSW_VID_LOWER_GPIO_CNTL 0x718 ++#define R600_HIGH_VID_LOWER_GPIO_CNTL 0x71c ++#define R600_MEDIUM_VID_LOWER_GPIO_CNTL 0x720 ++#define R600_LOW_VID_LOWER_GPIO_CNTL 0x724 ++ ++#define R600_MC_VM_FB_LOCATION 0x2180 ++#define R600_MC_VM_AGP_TOP 0x2184 ++#define R600_MC_VM_AGP_BOT 0x2188 ++#define R600_MC_VM_AGP_BASE 0x218c ++#define R600_MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2190 ++#define R600_MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2194 ++#define R600_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x2198 ++ ++#define R700_MC_VM_FB_LOCATION 0x2024 ++ ++#define R600_HDP_NONSURFACE_BASE 0x2c04 ++ ++#define R600_BUS_CNTL 0x5420 ++#define R600_CONFIG_CNTL 0x5424 ++#define R600_CONFIG_MEMSIZE 0x5428 ++#define R600_CONFIG_F0_BASE 0x542C ++#define R600_CONFIG_APER_SIZE 0x5430 ++ ++#define R600_ROM_CNTL 0x1600 ++# define R600_SCK_OVERWRITE (1 << 1) ++# define R600_SCK_PRESCALE_CRYSTAL_CLK_SHIFT 28 ++# define R600_SCK_PRESCALE_CRYSTAL_CLK_MASK (0xf << 28) ++ ++#define R600_BIOS_0_SCRATCH 0x1724 ++#define R600_BIOS_1_SCRATCH 0x1728 ++#define R600_BIOS_2_SCRATCH 0x172c ++#define R600_BIOS_3_SCRATCH 0x1730 ++#define R600_BIOS_4_SCRATCH 0x1734 ++#define R600_BIOS_5_SCRATCH 0x1738 ++#define R600_BIOS_6_SCRATCH 0x173c ++#define R600_BIOS_7_SCRATCH 0x1740 ++ ++#define R300_GB_TILE_CONFIG 0x4018 ++# define R300_ENABLE_TILING (1 << 0) ++# define R300_PIPE_COUNT_RV350 (0 << 1) ++# define R300_PIPE_COUNT_R300 (3 << 1) ++# define R300_PIPE_COUNT_R420_3P (6 << 1) ++# define R300_PIPE_COUNT_R420 (7 << 1) ++# define R300_TILE_SIZE_8 (0 << 4) ++# define R300_TILE_SIZE_16 (1 << 4) ++# define R300_TILE_SIZE_32 (2 << 4) ++# define R300_SUBPIXEL_1_12 (0 << 16) ++# define R300_SUBPIXEL_1_16 (1 << 16) ++#define R300_GB_SELECT 0x401c ++#define R300_GB_ENABLE 0x4008 ++#define R300_GB_AA_CONFIG 0x4020 ++#define R400_GB_PIPE_SELECT 0x402c ++#define R300_GB_MSPOS0 0x4010 ++# define R300_MS_X0_SHIFT 0 ++# define R300_MS_Y0_SHIFT 4 ++# define R300_MS_X1_SHIFT 8 ++# define R300_MS_Y1_SHIFT 12 ++# define R300_MS_X2_SHIFT 16 ++# define R300_MS_Y2_SHIFT 20 ++# define R300_MSBD0_Y_SHIFT 24 ++# define R300_MSBD0_X_SHIFT 28 ++#define R300_GB_MSPOS1 0x4014 ++# define R300_MS_X3_SHIFT 0 ++# define R300_MS_Y3_SHIFT 4 ++# define R300_MS_X4_SHIFT 8 ++# define R300_MS_Y4_SHIFT 12 ++# define R300_MS_X5_SHIFT 16 ++# define R300_MS_Y5_SHIFT 20 ++# define R300_MSBD1_SHIFT 24 ++ ++#define R300_GA_ENHANCE 0x4274 ++# define R300_GA_DEADLOCK_CNTL (1 << 0) ++# define R300_GA_FASTSYNC_CNTL (1 << 1) ++ ++#define R300_GA_POLY_MODE 0x4288 ++# define R300_FRONT_PTYPE_POINT (0 << 4) ++# define R300_FRONT_PTYPE_LINE (1 << 4) ++# define R300_FRONT_PTYPE_TRIANGE (2 << 4) ++# define R300_BACK_PTYPE_POINT (0 << 7) ++# define R300_BACK_PTYPE_LINE (1 << 7) ++# define R300_BACK_PTYPE_TRIANGE (2 << 7) ++#define R300_GA_ROUND_MODE 0x428c ++# define R300_GEOMETRY_ROUND_TRUNC (0 << 0) ++# define R300_GEOMETRY_ROUND_NEAREST (1 << 0) ++# define R300_COLOR_ROUND_TRUNC (0 << 2) ++# define R300_COLOR_ROUND_NEAREST (1 << 2) ++#define R300_GA_COLOR_CONTROL 0x4278 ++# define R300_RGB0_SHADING_SOLID (0 << 0) ++# define R300_RGB0_SHADING_FLAT (1 << 0) ++# define R300_RGB0_SHADING_GOURAUD (2 << 0) ++# define R300_ALPHA0_SHADING_SOLID (0 << 2) ++# define R300_ALPHA0_SHADING_FLAT (1 << 2) ++# define R300_ALPHA0_SHADING_GOURAUD (2 << 2) ++# define R300_RGB1_SHADING_SOLID (0 << 4) ++# define R300_RGB1_SHADING_FLAT (1 << 4) ++# define R300_RGB1_SHADING_GOURAUD (2 << 4) ++# define R300_ALPHA1_SHADING_SOLID (0 << 6) ++# define R300_ALPHA1_SHADING_FLAT (1 << 6) ++# define R300_ALPHA1_SHADING_GOURAUD (2 << 6) ++# define R300_RGB2_SHADING_SOLID (0 << 8) ++# define R300_RGB2_SHADING_FLAT (1 << 8) ++# define R300_RGB2_SHADING_GOURAUD (2 << 8) ++# define R300_ALPHA2_SHADING_SOLID (0 << 10) ++# define R300_ALPHA2_SHADING_FLAT (1 << 10) ++# define R300_ALPHA2_SHADING_GOURAUD (2 << 10) ++# define R300_RGB3_SHADING_SOLID (0 << 12) ++# define R300_RGB3_SHADING_FLAT (1 << 12) ++# define R300_RGB3_SHADING_GOURAUD (2 << 12) ++# define R300_ALPHA3_SHADING_SOLID (0 << 14) ++# define R300_ALPHA3_SHADING_FLAT (1 << 14) ++# define R300_ALPHA3_SHADING_GOURAUD (2 << 14) ++#define R300_GA_OFFSET 0x4290 ++ ++#define R500_SU_REG_DEST 0x42c8 ++ ++#define R300_VAP_CNTL_STATUS 0x2140 ++# define R300_PVS_BYPASS (1 << 8) ++#define R300_VAP_PVS_STATE_FLUSH_REG 0x2284 ++#define R300_VAP_CNTL 0x2080 ++# define R300_PVS_NUM_SLOTS_SHIFT 0 ++# define R300_PVS_NUM_CNTLRS_SHIFT 4 ++# define R300_PVS_NUM_FPUS_SHIFT 8 ++# define R300_VF_MAX_VTX_NUM_SHIFT 18 ++# define R300_GL_CLIP_SPACE_DEF (0 << 22) ++# define R300_DX_CLIP_SPACE_DEF (1 << 22) ++# define R500_TCL_STATE_OPTIMIZATION (1 << 23) ++#define R300_VAP_VTE_CNTL 0x20B0 ++# define R300_VPORT_X_SCALE_ENA (1 << 0) ++# define R300_VPORT_X_OFFSET_ENA (1 << 1) ++# define R300_VPORT_Y_SCALE_ENA (1 << 2) ++# define R300_VPORT_Y_OFFSET_ENA (1 << 3) ++# define R300_VPORT_Z_SCALE_ENA (1 << 4) ++# define R300_VPORT_Z_OFFSET_ENA (1 << 5) ++# define R300_VTX_XY_FMT (1 << 8) ++# define R300_VTX_Z_FMT (1 << 9) ++# define R300_VTX_W0_FMT (1 << 10) ++#define R300_VAP_VTX_STATE_CNTL 0x2180 ++#define R300_VAP_PSC_SGN_NORM_CNTL 0x21DC ++#define R300_VAP_PROG_STREAM_CNTL_0 0x2150 ++# define R300_DATA_TYPE_0_SHIFT 0 ++# define R300_DATA_TYPE_FLOAT_1 0 ++# define R300_DATA_TYPE_FLOAT_2 1 ++# define R300_DATA_TYPE_FLOAT_3 2 ++# define R300_DATA_TYPE_FLOAT_4 3 ++# define R300_DATA_TYPE_BYTE 4 ++# define R300_DATA_TYPE_D3DCOLOR 5 ++# define R300_DATA_TYPE_SHORT_2 6 ++# define R300_DATA_TYPE_SHORT_4 7 ++# define R300_DATA_TYPE_VECTOR_3_TTT 8 ++# define R300_DATA_TYPE_VECTOR_3_EET 9 ++# define R300_SKIP_DWORDS_0_SHIFT 4 ++# define R300_DST_VEC_LOC_0_SHIFT 8 ++# define R300_LAST_VEC_0 (1 << 13) ++# define R300_SIGNED_0 (1 << 14) ++# define R300_NORMALIZE_0 (1 << 15) ++# define R300_DATA_TYPE_1_SHIFT 16 ++# define R300_SKIP_DWORDS_1_SHIFT 20 ++# define R300_DST_VEC_LOC_1_SHIFT 24 ++# define R300_LAST_VEC_1 (1 << 29) ++# define R300_SIGNED_1 (1 << 30) ++# define R300_NORMALIZE_1 (1 << 31) ++#define R300_VAP_PROG_STREAM_CNTL_1 0x2154 ++# define R300_DATA_TYPE_2_SHIFT 0 ++# define R300_SKIP_DWORDS_2_SHIFT 4 ++# define R300_DST_VEC_LOC_2_SHIFT 8 ++# define R300_LAST_VEC_2 (1 << 13) ++# define R300_SIGNED_2 (1 << 14) ++# define R300_NORMALIZE_2 (1 << 15) ++# define R300_DATA_TYPE_3_SHIFT 16 ++# define R300_SKIP_DWORDS_3_SHIFT 20 ++# define R300_DST_VEC_LOC_3_SHIFT 24 ++# define R300_LAST_VEC_3 (1 << 29) ++# define R300_SIGNED_3 (1 << 30) ++# define R300_NORMALIZE_3 (1 << 31) ++#define R300_VAP_PROG_STREAM_CNTL_EXT_0 0x21e0 ++# define R300_SWIZZLE_SELECT_X_0_SHIFT 0 ++# define R300_SWIZZLE_SELECT_Y_0_SHIFT 3 ++# define R300_SWIZZLE_SELECT_Z_0_SHIFT 6 ++# define R300_SWIZZLE_SELECT_W_0_SHIFT 9 ++# define R300_SWIZZLE_SELECT_X 0 ++# define R300_SWIZZLE_SELECT_Y 1 ++# define R300_SWIZZLE_SELECT_Z 2 ++# define R300_SWIZZLE_SELECT_W 3 ++# define R300_SWIZZLE_SELECT_FP_ZERO 4 ++# define R300_SWIZZLE_SELECT_FP_ONE 5 ++# define R300_WRITE_ENA_0_SHIFT 12 ++# define R300_WRITE_ENA_X 1 ++# define R300_WRITE_ENA_Y 2 ++# define R300_WRITE_ENA_Z 4 ++# define R300_WRITE_ENA_W 8 ++# define R300_SWIZZLE_SELECT_X_1_SHIFT 16 ++# define R300_SWIZZLE_SELECT_Y_1_SHIFT 19 ++# define R300_SWIZZLE_SELECT_Z_1_SHIFT 22 ++# define R300_SWIZZLE_SELECT_W_1_SHIFT 25 ++# define R300_WRITE_ENA_1_SHIFT 28 ++#define R300_VAP_PROG_STREAM_CNTL_EXT_1 0x21e4 ++# define R300_SWIZZLE_SELECT_X_2_SHIFT 0 ++# define R300_SWIZZLE_SELECT_Y_2_SHIFT 3 ++# define R300_SWIZZLE_SELECT_Z_2_SHIFT 6 ++# define R300_SWIZZLE_SELECT_W_2_SHIFT 9 ++# define R300_WRITE_ENA_2_SHIFT 12 ++# define R300_SWIZZLE_SELECT_X_3_SHIFT 16 ++# define R300_SWIZZLE_SELECT_Y_3_SHIFT 19 ++# define R300_SWIZZLE_SELECT_Z_3_SHIFT 22 ++# define R300_SWIZZLE_SELECT_W_3_SHIFT 25 ++# define R300_WRITE_ENA_3_SHIFT 28 ++#define R300_VAP_PVS_CODE_CNTL_0 0x22D0 ++# define R300_PVS_FIRST_INST_SHIFT 0 ++# define R300_PVS_XYZW_VALID_INST_SHIFT 10 ++# define R300_PVS_LAST_INST_SHIFT 20 ++#define R300_VAP_PVS_CODE_CNTL_1 0x22D8 ++# define R300_PVS_LAST_VTX_SRC_INST_SHIFT 0 ++#define R300_VAP_PVS_VECTOR_INDX_REG 0x2200 ++#define R300_VAP_PVS_VECTOR_DATA_REG 0x2204 ++/* PVS instructions */ ++/* Opcode and dst instruction */ ++#define R300_PVS_DST_OPCODE(x) (x << 0) ++/* Vector ops */ ++# define R300_VECTOR_NO_OP 0 ++# define R300_VE_DOT_PRODUCT 1 ++# define R300_VE_MULTIPLY 2 ++# define R300_VE_ADD 3 ++# define R300_VE_MULTIPLY_ADD 4 ++# define R300_VE_DISTANCE_VECTOR 5 ++# define R300_VE_FRACTION 6 ++# define R300_VE_MAXIMUM 7 ++# define R300_VE_MINIMUM 8 ++# define R300_VE_SET_GREATER_THAN_EQUAL 9 ++# define R300_VE_SET_LESS_THAN 10 ++# define R300_VE_MULTIPLYX2_ADD 11 ++# define R300_VE_MULTIPLY_CLAMP 12 ++# define R300_VE_FLT2FIX_DX 13 ++# define R300_VE_FLT2FIX_DX_RND 14 ++/* R500 additions */ ++# define R500_VE_PRED_SET_EQ_PUSH 15 ++# define R500_VE_PRED_SET_GT_PUSH 16 ++# define R500_VE_PRED_SET_GTE_PUSH 17 ++# define R500_VE_PRED_SET_NEQ_PUSH 18 ++# define R500_VE_COND_WRITE_EQ 19 ++# define R500_VE_COND_WRITE_GT 20 ++# define R500_VE_COND_WRITE_GTE 21 ++# define R500_VE_COND_WRITE_NEQ 22 ++# define R500_VE_COND_MUX_EQ 23 ++# define R500_VE_COND_MUX_GT 24 ++# define R500_VE_COND_MUX_GTE 25 ++# define R500_VE_SET_GREATER_THAN 26 ++# define R500_VE_SET_EQUAL 27 ++# define R500_VE_SET_NOT_EQUAL 28 ++/* Math ops */ ++# define R300_MATH_NO_OP 0 ++# define R300_ME_EXP_BASE2_DX 1 ++# define R300_ME_LOG_BASE2_DX 2 ++# define R300_ME_EXP_BASEE_FF 3 ++# define R300_ME_LIGHT_COEFF_DX 4 ++# define R300_ME_POWER_FUNC_FF 5 ++# define R300_ME_RECIP_DX 6 ++# define R300_ME_RECIP_FF 7 ++# define R300_ME_RECIP_SQRT_DX 8 ++# define R300_ME_RECIP_SQRT_FF 9 ++# define R300_ME_MULTIPLY 10 ++# define R300_ME_EXP_BASE2_FULL_DX 11 ++# define R300_ME_LOG_BASE2_FULL_DX 12 ++# define R300_ME_POWER_FUNC_FF_CLAMP_B 13 ++# define R300_ME_POWER_FUNC_FF_CLAMP_B1 14 ++# define R300_ME_POWER_FUNC_FF_CLAMP_01 15 ++# define R300_ME_SIN 16 ++# define R300_ME_COS 17 ++/* R500 additions */ ++# define R500_ME_LOG_BASE2_IEEE 18 ++# define R500_ME_RECIP_IEEE 19 ++# define R500_ME_RECIP_SQRT_IEEE 20 ++# define R500_ME_PRED_SET_EQ 21 ++# define R500_ME_PRED_SET_GT 22 ++# define R500_ME_PRED_SET_GTE 23 ++# define R500_ME_PRED_SET_NEQ 24 ++# define R500_ME_PRED_SET_CLR 25 ++# define R500_ME_PRED_SET_INV 26 ++# define R500_ME_PRED_SET_POP 27 ++# define R500_ME_PRED_SET_RESTORE 28 ++/* macro */ ++# define R300_PVS_MACRO_OP_2CLK_MADD 0 ++# define R300_PVS_MACRO_OP_2CLK_M2X_ADD 1 ++#define R300_PVS_DST_MATH_INST (1 << 6) ++#define R300_PVS_DST_MACRO_INST (1 << 7) ++#define R300_PVS_DST_REG_TYPE(x) (x << 8) ++# define R300_PVS_DST_REG_TEMPORARY 0 ++# define R300_PVS_DST_REG_A0 1 ++# define R300_PVS_DST_REG_OUT 2 ++# define R500_PVS_DST_REG_OUT_REPL_X 3 ++# define R300_PVS_DST_REG_ALT_TEMPORARY 4 ++# define R300_PVS_DST_REG_INPUT 5 ++#define R300_PVS_DST_ADDR_MODE_1 (1 << 12) ++#define R300_PVS_DST_OFFSET(x) (x << 13) ++#define R300_PVS_DST_WE_X (1 << 20) ++#define R300_PVS_DST_WE_Y (1 << 21) ++#define R300_PVS_DST_WE_Z (1 << 22) ++#define R300_PVS_DST_WE_W (1 << 23) ++#define R300_PVS_DST_VE_SAT (1 << 24) ++#define R300_PVS_DST_ME_SAT (1 << 25) ++#define R300_PVS_DST_PRED_ENABLE (1 << 26) ++#define R300_PVS_DST_PRED_SENSE (1 << 27) ++#define R300_PVS_DST_DUAL_MATH_OP (1 << 28) ++#define R300_PVS_DST_ADDR_SEL(x) (x << 29) ++#define R300_PVS_DST_ADDR_MODE_0 (1 << 31) ++/* src operand instruction */ ++#define R300_PVS_SRC_REG_TYPE(x) (x << 0) ++# define R300_PVS_SRC_REG_TEMPORARY 0 ++# define R300_PVS_SRC_REG_INPUT 1 ++# define R300_PVS_SRC_REG_CONSTANT 2 ++# define R300_PVS_SRC_REG_ALT_TEMPORARY 3 ++#define R300_SPARE_0 (1 << 2) ++#define R300_PVS_SRC_ABS_XYZW (1 << 3) ++#define R300_PVS_SRC_ADDR_MODE_0 (1 << 4) ++#define R300_PVS_SRC_OFFSET(x) (x << 5) ++#define R300_PVS_SRC_SWIZZLE_X(x) (x << 13) ++#define R300_PVS_SRC_SWIZZLE_Y(x) (x << 16) ++#define R300_PVS_SRC_SWIZZLE_Z(x) (x << 19) ++#define R300_PVS_SRC_SWIZZLE_W(x) (x << 22) ++# define R300_PVS_SRC_SELECT_X 0 ++# define R300_PVS_SRC_SELECT_Y 1 ++# define R300_PVS_SRC_SELECT_Z 2 ++# define R300_PVS_SRC_SELECT_W 3 ++# define R300_PVS_SRC_SELECT_FORCE_0 4 ++# define R300_PVS_SRC_SELECT_FORCE_1 5 ++#define R300_PVS_SRC_NEG_X (1 << 25) ++#define R300_PVS_SRC_NEG_Y (1 << 26) ++#define R300_PVS_SRC_NEG_Z (1 << 27) ++#define R300_PVS_SRC_NEG_W (1 << 28) ++#define R300_PVS_SRC_ADDR_SEL(x) (x << 29) ++#define R300_PVS_SRC_ADDR_MODE_1 (1 << 31) ++ ++#define R300_VAP_PVS_FLOW_CNTL_OPC 0x22DC ++#define R300_VAP_OUT_VTX_FMT_0 0x2090 ++# define R300_VTX_POS_PRESENT (1 << 0) ++# define R300_VTX_COLOR_0_PRESENT (1 << 1) ++# define R300_VTX_COLOR_1_PRESENT (1 << 2) ++# define R300_VTX_COLOR_2_PRESENT (1 << 3) ++# define R300_VTX_COLOR_3_PRESENT (1 << 4) ++# define R300_VTX_PT_SIZE_PRESENT (1 << 16) ++#define R300_VAP_OUT_VTX_FMT_1 0x2094 ++# define R300_TEX_0_COMP_CNT_SHIFT 0 ++# define R300_TEX_1_COMP_CNT_SHIFT 3 ++# define R300_TEX_2_COMP_CNT_SHIFT 6 ++# define R300_TEX_3_COMP_CNT_SHIFT 9 ++# define R300_TEX_4_COMP_CNT_SHIFT 12 ++# define R300_TEX_5_COMP_CNT_SHIFT 15 ++# define R300_TEX_6_COMP_CNT_SHIFT 18 ++# define R300_TEX_7_COMP_CNT_SHIFT 21 ++#define R300_VAP_VTX_SIZE 0x20b4 ++#define R300_VAP_GB_VERT_CLIP_ADJ 0x2220 ++#define R300_VAP_GB_VERT_DISC_ADJ 0x2224 ++#define R300_VAP_GB_HORZ_CLIP_ADJ 0x2228 ++#define R300_VAP_GB_HORZ_DISC_ADJ 0x222c ++#define R300_VAP_CLIP_CNTL 0x221c ++# define R300_UCP_ENA_0 (1 << 0) ++# define R300_UCP_ENA_1 (1 << 1) ++# define R300_UCP_ENA_2 (1 << 2) ++# define R300_UCP_ENA_3 (1 << 3) ++# define R300_UCP_ENA_4 (1 << 4) ++# define R300_UCP_ENA_5 (1 << 5) ++# define R300_PS_UCP_MODE_SHIFT 14 ++# define R300_CLIP_DISABLE (1 << 16) ++# define R300_UCP_CULL_ONLY_ENA (1 << 17) ++# define R300_BOUNDARY_EDGE_FLAG_ENA (1 << 18) ++#define R300_VAP_PVS_STATE_FLUSH_REG 0x2284 ++ ++#define R500_VAP_INDEX_OFFSET 0x208c ++ ++#define R300_SU_TEX_WRAP 0x42a0 ++#define R300_SU_POLY_OFFSET_ENABLE 0x42b4 ++#define R300_SU_CULL_MODE 0x42b8 ++# define R300_CULL_FRONT (1 << 0) ++# define R300_CULL_BACK (1 << 1) ++# define R300_FACE_POS (0 << 2) ++# define R300_FACE_NEG (1 << 2) ++#define R300_SU_DEPTH_SCALE 0x42c0 ++#define R300_SU_DEPTH_OFFSET 0x42c4 ++ ++#define R300_RS_COUNT 0x4300 ++# define R300_RS_COUNT_IT_COUNT_SHIFT 0 ++# define R300_RS_COUNT_IC_COUNT_SHIFT 7 ++# define R300_RS_COUNT_HIRES_EN (1 << 18) ++ ++#define R300_RS_IP_0 0x4310 ++#define R300_RS_IP_1 0x4314 ++# define R300_RS_TEX_PTR(x) (x << 0) ++# define R300_RS_COL_PTR(x) (x << 6) ++# define R300_RS_COL_FMT(x) (x << 9) ++# define R300_RS_COL_FMT_RGBA 0 ++# define R300_RS_COL_FMT_RGB0 2 ++# define R300_RS_COL_FMT_RGB1 3 ++# define R300_RS_COL_FMT_000A 4 ++# define R300_RS_COL_FMT_0000 5 ++# define R300_RS_COL_FMT_0001 6 ++# define R300_RS_COL_FMT_111A 8 ++# define R300_RS_COL_FMT_1110 9 ++# define R300_RS_COL_FMT_1111 10 ++# define R300_RS_SEL_S(x) (x << 13) ++# define R300_RS_SEL_T(x) (x << 16) ++# define R300_RS_SEL_R(x) (x << 19) ++# define R300_RS_SEL_Q(x) (x << 22) ++# define R300_RS_SEL_C0 0 ++# define R300_RS_SEL_C1 1 ++# define R300_RS_SEL_C2 2 ++# define R300_RS_SEL_C3 3 ++# define R300_RS_SEL_K0 4 ++# define R300_RS_SEL_K1 5 ++#define R300_RS_INST_COUNT 0x4304 ++# define R300_INST_COUNT_RS(x) (x << 0) ++# define R300_RS_W_EN (1 << 4) ++# define R300_TX_OFFSET_RS(x) (x << 5) ++#define R300_RS_INST_0 0x4330 ++#define R300_RS_INST_1 0x4334 ++# define R300_INST_TEX_ID(x) (x << 0) ++# define R300_RS_INST_TEX_CN_WRITE (1 << 3) ++# define R300_INST_TEX_ADDR(x) (x << 6) ++ ++#define R300_TX_INVALTAGS 0x4100 ++#define R300_TX_FILTER0_0 0x4400 ++# define R300_TX_CLAMP_S(x) (x << 0) ++# define R300_TX_CLAMP_T(x) (x << 3) ++# define R300_TX_CLAMP_R(x) (x << 6) ++# define R300_TX_CLAMP_WRAP 0 ++# define R300_TX_CLAMP_MIRROR 1 ++# define R300_TX_CLAMP_CLAMP_LAST 2 ++# define R300_TX_CLAMP_MIRROR_CLAMP_LAST 3 ++# define R300_TX_CLAMP_CLAMP_BORDER 4 ++# define R300_TX_CLAMP_MIRROR_CLAMP_BORDER 5 ++# define R300_TX_CLAMP_CLAMP_GL 6 ++# define R300_TX_CLAMP_MIRROR_CLAMP_GL 7 ++# define R300_TX_MAG_FILTER_NEAREST (1 << 9) ++# define R300_TX_MIN_FILTER_NEAREST (1 << 11) ++# define R300_TX_MAG_FILTER_LINEAR (2 << 9) ++# define R300_TX_MIN_FILTER_LINEAR (2 << 11) ++# define R300_TX_ID_SHIFT 28 ++#define R300_TX_FILTER1_0 0x4440 ++#define R300_TX_FORMAT0_0 0x4480 ++# define R300_TXWIDTH_SHIFT 0 ++# define R300_TXHEIGHT_SHIFT 11 ++# define R300_NUM_LEVELS_SHIFT 26 ++# define R300_NUM_LEVELS_MASK 0x ++# define R300_TXPROJECTED (1 << 30) ++# define R300_TXPITCH_EN (1 << 31) ++#define R300_TX_FORMAT1_0 0x44c0 ++# define R300_TX_FORMAT_X8 0x0 ++# define R300_TX_FORMAT_X16 0x1 ++# define R300_TX_FORMAT_Y4X4 0x2 ++# define R300_TX_FORMAT_Y8X8 0x3 ++# define R300_TX_FORMAT_Y16X16 0x4 ++# define R300_TX_FORMAT_Z3Y3X2 0x5 ++# define R300_TX_FORMAT_Z5Y6X5 0x6 ++# define R300_TX_FORMAT_Z6Y5X5 0x7 ++# define R300_TX_FORMAT_Z11Y11X10 0x8 ++# define R300_TX_FORMAT_Z10Y11X11 0x9 ++# define R300_TX_FORMAT_W4Z4Y4X4 0xA ++# define R300_TX_FORMAT_W1Z5Y5X5 0xB ++# define R300_TX_FORMAT_W8Z8Y8X8 0xC ++# define R300_TX_FORMAT_W2Z10Y10X10 0xD ++# define R300_TX_FORMAT_W16Z16Y16X16 0xE ++# define R300_TX_FORMAT_DXT1 0xF ++# define R300_TX_FORMAT_DXT3 0x10 ++# define R300_TX_FORMAT_DXT5 0x11 ++# define R300_TX_FORMAT_D3DMFT_CxV8U8 0x12 /* no swizzle */ ++# define R300_TX_FORMAT_A8R8G8B8 0x13 /* no swizzle */ ++# define R300_TX_FORMAT_B8G8_B8G8 0x14 /* no swizzle */ ++# define R300_TX_FORMAT_G8R8_G8B8 0x15 /* no swizzle */ ++# define R300_TX_FORMAT_VYUY422 0x14 /* no swizzle */ ++# define R300_TX_FORMAT_YVYU422 0x15 /* no swizzle */ ++# define R300_TX_FORMAT_X24_Y8 0x1e ++# define R300_TX_FORMAT_X32 0x1e ++ /* Floating point formats */ ++ /* Note - hardware supports both 16 and 32 bit floating point */ ++# define R300_TX_FORMAT_FL_I16 0x18 ++# define R300_TX_FORMAT_FL_I16A16 0x19 ++# define R300_TX_FORMAT_FL_R16G16B16A16 0x1A ++# define R300_TX_FORMAT_FL_I32 0x1B ++# define R300_TX_FORMAT_FL_I32A32 0x1C ++# define R300_TX_FORMAT_FL_R32G32B32A32 0x1D ++ /* alpha modes, convenience mostly */ ++ /* if you have alpha, pick constant appropriate to the ++ number of channels (1 for I8, 2 for I8A8, 4 for R8G8B8A8, etc */ ++# define R300_TX_FORMAT_ALPHA_1CH 0x000 ++# define R300_TX_FORMAT_ALPHA_2CH 0x200 ++# define R300_TX_FORMAT_ALPHA_4CH 0x600 ++# define R300_TX_FORMAT_ALPHA_NONE 0xA00 ++ /* Swizzling */ ++ /* constants */ ++# define R300_TX_FORMAT_X 0 ++# define R300_TX_FORMAT_Y 1 ++# define R300_TX_FORMAT_Z 2 ++# define R300_TX_FORMAT_W 3 ++# define R300_TX_FORMAT_ZERO 4 ++# define R300_TX_FORMAT_ONE 5 ++ /* 2.0*Z, everything above 1.0 is set to 0.0 */ ++# define R300_TX_FORMAT_CUT_Z 6 ++ /* 2.0*W, everything above 1.0 is set to 0.0 */ ++# define R300_TX_FORMAT_CUT_W 7 ++ ++# define R300_TX_FORMAT_B_SHIFT 18 ++# define R300_TX_FORMAT_G_SHIFT 15 ++# define R300_TX_FORMAT_R_SHIFT 12 ++# define R300_TX_FORMAT_A_SHIFT 9 ++ ++ /* Convenience macro to take care of layout and swizzling */ ++# define R300_EASY_TX_FORMAT(B, G, R, A, FMT) ( \ ++ ((R300_TX_FORMAT_##B)<microcode_version != UCODE_R200) { +- DRM_ERROR("Invalid 3d packet for r100-class chip\n"); ++ if ((dev_priv->chip_family < CHIP_R200) || ++ (dev_priv->chip_family > CHIP_RV280)) { ++ DRM_ERROR("Invalid 3d packet for non r200-class chip\n"); + return -EINVAL; + } + break; +@@ -359,8 +360,8 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t * + break; + + case RADEON_3D_RNDR_GEN_INDX_PRIM: +- if (dev_priv->microcode_version != UCODE_R100) { +- DRM_ERROR("Invalid 3d packet for r200-class chip\n"); ++ if (dev_priv->chip_family > CHIP_RS200) { ++ DRM_ERROR("Invalid 3d packet for non-r100-class chip\n"); + return -EINVAL; + } + if (radeon_check_and_fixup_offset(dev_priv, file_priv, &cmd[1])) { +@@ -370,8 +371,10 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t * + break; + + case RADEON_CP_INDX_BUFFER: +- if (dev_priv->microcode_version != UCODE_R200) { +- DRM_ERROR("Invalid 3d packet for r100-class chip\n"); ++ /* safe but r200 only */ ++ if ((dev_priv->chip_family < CHIP_R200) || ++ (dev_priv->chip_family > CHIP_RV280)) { ++ DRM_ERROR("Invalid 3d packet for non-r200-class chip\n"); + return -EINVAL; + } + if ((cmd[1] & 0x8000ffff) != 0x80000810) { +@@ -1018,7 +1021,7 @@ static void radeon_cp_dispatch_clear(struct drm_device * dev, + int tileoffset, nrtilesx, nrtilesy, j; + /* it looks like r200 needs rv-style clears, at least if hierz is not enabled? */ + if ((dev_priv->flags & RADEON_HAS_HIERZ) +- && !(dev_priv->microcode_version == UCODE_R200)) { ++ && (dev_priv->chip_family < CHIP_R200)) { + /* FIXME : figure this out for r200 (when hierz is enabled). Or + maybe r200 actually doesn't need to put the low-res z value into + the tile cache like r100, but just needs to clear the hi-level z-buffer? +@@ -1047,7 +1050,8 @@ static void radeon_cp_dispatch_clear(struct drm_device * dev, + ADVANCE_RING(); + tileoffset += depthpixperline >> 6; + } +- } else if (dev_priv->microcode_version == UCODE_R200) { ++ } else if ((dev_priv->chip_family >= CHIP_R200) && ++ (dev_priv->chip_family <= CHIP_RV280)) { + /* works for rv250. */ + /* find first macro tile (8x2 4x4 z-pixels on rv250) */ + tileoffset = +@@ -1102,7 +1106,8 @@ static void radeon_cp_dispatch_clear(struct drm_device * dev, + + /* TODO don't always clear all hi-level z tiles */ + if ((dev_priv->flags & RADEON_HAS_HIERZ) +- && (dev_priv->microcode_version == UCODE_R200) ++ && ((dev_priv->chip_family >= CHIP_R200) && ++ (dev_priv->chip_family <= CHIP_RV280)) + && (flags & RADEON_USE_HIERZ)) + /* r100 and cards without hierarchical z-buffer have no high-level z-buffer */ + /* FIXME : the mask supposedly contains low-res z values. So can't set +@@ -1122,8 +1127,9 @@ static void radeon_cp_dispatch_clear(struct drm_device * dev, + * rendering a quad into just those buffers. Thus, we have to + * make sure the 3D engine is configured correctly. + */ +- else if ((dev_priv->microcode_version == UCODE_R200) && +- (flags & (RADEON_DEPTH | RADEON_STENCIL))) { ++ else if ((dev_priv->chip_family >= CHIP_R200) && ++ (dev_priv->chip_family <= CHIP_RV280) && ++ (flags & (RADEON_DEPTH | RADEON_STENCIL))) { + + int tempPP_CNTL; + int tempRE_CNTL; +@@ -1878,10 +1884,11 @@ static int radeon_cp_dispatch_texture(struct drm_device * dev, + OUT_RING((image->width << 16) | height); + RADEON_WAIT_UNTIL_2D_IDLE(); + ADVANCE_RING(); +- COMMIT_RING(); + + radeon_cp_discard_buffer(dev, file_priv->master, buf); + ++ COMMIT_RING(); ++ + /* Update the input parameters for next time */ + image->y += height; + image->height -= height; +@@ -2207,6 +2214,9 @@ static int radeon_cp_swap(struct drm_device *dev, void *data, struct drm_file *f + if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS) + sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS; + ++ if (dev_priv->mm.vram_offset) ++ radeon_gem_update_offsets(dev, file_priv->master); ++ + radeon_cp_dispatch_swap(dev, file_priv->master); + sarea_priv->ctx_owner = 0; + +@@ -2872,7 +2882,7 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file + + orig_nbox = cmdbuf->nbox; + +- if (dev_priv->microcode_version == UCODE_R300) { ++ if (dev_priv->chip_family >= CHIP_R300) { + int temp; + temp = r300_do_cp_cmdbuf(dev, file_priv, cmdbuf); + +@@ -3071,6 +3081,9 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil + case RADEON_PARAM_NUM_GB_PIPES: + value = dev_priv->num_gb_pipes; + break; ++ case RADEON_PARAM_KERNEL_MM: ++ value = dev_priv->mm_enabled; ++ break; + default: + DRM_DEBUG("Invalid parameter %d\n", param->param); + return -EINVAL; +@@ -3093,11 +3106,17 @@ static int radeon_cp_setparam(struct drm_device *dev, void *data, struct drm_fil + + switch (sp->param) { + case RADEON_SETPARAM_FB_LOCATION: ++ if (drm_core_check_feature(dev, DRIVER_MODESET)) ++ return 0; ++ + radeon_priv = file_priv->driver_priv; + radeon_priv->radeon_fb_delta = dev_priv->fb_location - + sp->value; + break; + case RADEON_SETPARAM_SWITCH_TILING: ++ if (drm_core_check_feature(dev, DRIVER_MODESET)) ++ return 0; ++ + if (sp->value == 0) { + DRM_DEBUG("color tiling disabled\n"); + dev_priv->front_pitch_offset &= ~RADEON_DST_TILE_MACRO; +@@ -3113,13 +3132,21 @@ static int radeon_cp_setparam(struct drm_device *dev, void *data, struct drm_fil + } + break; + case RADEON_SETPARAM_PCIGART_LOCATION: ++ if (drm_core_check_feature(dev, DRIVER_MODESET)) ++ return 0; ++ + dev_priv->pcigart_offset = sp->value; + dev_priv->pcigart_offset_set = 1; + break; + case RADEON_SETPARAM_NEW_MEMMAP: ++ if (drm_core_check_feature(dev, DRIVER_MODESET)) ++ return 0; + dev_priv->new_memmap = sp->value; + break; + case RADEON_SETPARAM_PCIGART_TABLE_SIZE: ++ if (drm_core_check_feature(dev, DRIVER_MODESET)) ++ return 0; ++ + dev_priv->gart_info.table_size = sp->value; + if (dev_priv->gart_info.table_size < RADEON_PCIGART_TABLE_SIZE) + dev_priv->gart_info.table_size = RADEON_PCIGART_TABLE_SIZE; +@@ -3127,6 +3154,14 @@ static int radeon_cp_setparam(struct drm_device *dev, void *data, struct drm_fil + case RADEON_SETPARAM_VBLANK_CRTC: + return radeon_vblank_crtc_set(dev, sp->value); + break; ++ case RADEON_SETPARAM_MM_INIT: ++ if (drm_core_check_feature(dev, DRIVER_MODESET)) ++ return 0; ++ ++ dev_priv->new_memmap = true; ++ dev_priv->user_mm_enable = true; ++ return radeon_gem_mm_init(dev); ++ break; + default: + DRM_DEBUG("Invalid parameter %d\n", sp->param); + return -EINVAL; +@@ -3215,7 +3250,19 @@ struct drm_ioctl_desc radeon_ioctls[] = { + DRM_IOCTL_DEF(DRM_RADEON_IRQ_WAIT, radeon_irq_wait, DRM_AUTH), + DRM_IOCTL_DEF(DRM_RADEON_SETPARAM, radeon_cp_setparam, DRM_AUTH), + DRM_IOCTL_DEF(DRM_RADEON_SURF_ALLOC, radeon_surface_alloc, DRM_AUTH), +- DRM_IOCTL_DEF(DRM_RADEON_SURF_FREE, radeon_surface_free, DRM_AUTH) ++ DRM_IOCTL_DEF(DRM_RADEON_SURF_FREE, radeon_surface_free, DRM_AUTH), ++ ++ DRM_IOCTL_DEF(DRM_RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH), ++ ++ DRM_IOCTL_DEF(DRM_RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_RADEON_GEM_PIN, radeon_gem_pin_ioctl, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_RADEON_GEM_UNPIN, radeon_gem_unpin_ioctl, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_RADEON_GEM_WAIT_RENDERING, radeon_gem_wait_rendering, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_RADEON_CS, radeon_cs_ioctl, DRM_AUTH), + }; + + int radeon_max_ioctl = DRM_ARRAY_SIZE(radeon_ioctls); +diff --git a/include/drm/drm.h b/include/drm/drm.h +index 32e5096..46389d5 100644 +--- a/include/drm/drm.h ++++ b/include/drm/drm.h +@@ -174,6 +174,7 @@ enum drm_map_type { + _DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */ + _DRM_CONSISTENT = 5, /**< Consistent memory for PCI DMA */ + _DRM_GEM = 6, /**< GEM object */ ++ _DRM_TTM = 7, /**< TTM type */ + }; + + /** +diff --git a/include/drm/drmP.h b/include/drm/drmP.h +index 7802c80..51e7943 100644 +--- a/include/drm/drmP.h ++++ b/include/drm/drmP.h +@@ -147,9 +147,23 @@ struct drm_device; + #define DRM_MEM_CTXLIST 21 + #define DRM_MEM_MM 22 + #define DRM_MEM_HASHTAB 23 ++#define DRM_MEM_OBJECTS 24 ++#define DRM_MEM_FENCE 25 ++#define DRM_MEM_TTM 26 ++#define DRM_MEM_BUFOBJ 27 + + #define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8) + #define DRM_MAP_HASH_OFFSET 0x10000000 ++#define DRM_MAP_HASH_ORDER 12 ++#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1) ++#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16) ++/* ++ * This should be small enough to allow the use of kmalloc for hash tables ++ * instead of vmalloc. ++ */ ++ ++#define DRM_FILE_HASH_ORDER 8 ++#define DRM_MM_INIT_MAX_PAGES 256 + + /*@}*/ + +@@ -659,6 +673,8 @@ struct drm_master { + void *driver_priv; /**< Private structure for driver to use */ + }; + ++#include "drm_objects.h" ++ + /** + * DRM driver structure. This structure represent the common code for + * a family of cards. There will one drm_device for each card present +@@ -777,6 +793,8 @@ struct drm_driver { + + /* Driver private ops for this object */ + struct vm_operations_struct *gem_vm_ops; ++ struct drm_fence_driver *fence_driver; ++ struct drm_bo_driver *bo_driver; + + int major; + int minor; +@@ -852,7 +870,10 @@ struct drm_device { + /*@{ */ + struct list_head maplist; /**< Linked list of regions */ + int map_count; /**< Number of mappable regions */ +- struct drm_open_hash map_hash; /**< User token hash table for maps */ ++ struct drm_open_hash map_hash; /**< User token hash table for maps */ ++ struct drm_mm offset_manager; /**< User token manager */ ++ struct address_space *dev_mapping; /**< For unmap_mapping_range() */ ++ struct page *ttm_dummy_page; + + /** \name Context handle management */ + /*@{ */ +@@ -863,6 +884,7 @@ struct drm_device { + struct idr ctx_idr; + + struct list_head vmalist; /**< List of vmas (for debugging) */ ++ struct drm_hw_lock default_lock; + + /*@} */ + +@@ -935,7 +957,6 @@ struct drm_device { + int num_crtcs; /**< Number of CRTCs on this device */ + void *dev_private; /**< device private data */ + void *mm_private; +- struct address_space *dev_mapping; + struct drm_sigdata sigdata; /**< For block_all_signals */ + sigset_t sigmask; + +@@ -945,6 +966,9 @@ struct drm_device { + struct drm_minor *control; /**< Control node for card */ + struct drm_minor *primary; /**< render type primary screen head */ + ++ struct drm_fence_manager fm; ++ struct drm_buffer_manager bm; ++ + /** \name Drawable information */ + /*@{ */ + spinlock_t drw_lock; +@@ -967,8 +991,28 @@ struct drm_device { + uint32_t invalidate_domains; /* domains pending invalidation */ + uint32_t flush_domains; /* domains pending flush */ + /*@} */ ++}; + ++#if __OS_HAS_AGP ++struct drm_agp_ttm_backend { ++ struct drm_ttm_backend backend; ++ DRM_AGP_MEM *mem; ++ struct agp_bridge_data *bridge; ++ int populated; ++}; ++#endif ++struct ati_pcigart_ttm_backend { ++ struct drm_ttm_backend backend; ++ int populated; ++ void (*gart_flush_fn)(struct drm_device *dev); ++ struct drm_ati_pcigart_info *gart_info; ++ unsigned long offset; ++ struct page **pages; ++ int num_pages; ++ int bound; ++ struct drm_device *dev; + }; ++extern struct drm_ttm_backend *ati_pcigart_init_ttm(struct drm_device *dev, struct drm_ati_pcigart_info *info, void (*gart_flush_fn)(struct drm_device *dev)); + + static inline int drm_dev_to_irq(struct drm_device *dev) + { +@@ -1078,6 +1122,17 @@ extern DRM_AGP_MEM *drm_agp_bind_pages(struct drm_device *dev, + uint32_t type); + extern int drm_unbind_agp(DRM_AGP_MEM * handle); + ++extern void drm_free_memctl(size_t size); ++extern int drm_alloc_memctl(size_t size); ++extern void drm_query_memctl(uint64_t *cur_used, ++ uint64_t *emer_used, ++ uint64_t *low_threshold, ++ uint64_t *high_threshold, ++ uint64_t *emer_threshold); ++extern void drm_init_memctl(size_t low_threshold, ++ size_t high_threshold, ++ size_t unit_size); ++ + /* Misc. IOCTL support (drm_ioctl.h) */ + extern int drm_irq_by_busid(struct drm_device *dev, void *data, + struct drm_file *file_priv); +@@ -1248,6 +1303,7 @@ extern DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data *bridge, size + extern int drm_agp_free_memory(DRM_AGP_MEM * handle); + extern int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start); + extern int drm_agp_unbind_memory(DRM_AGP_MEM * handle); ++extern struct drm_ttm_backend *drm_agp_init_ttm(struct drm_device *dev); + extern void drm_agp_chipset_flush(struct drm_device *dev); + + /* Stub support (drm_stub.h) */ +@@ -1289,6 +1345,8 @@ extern int drm_ati_pcigart_init(struct drm_device *dev, + struct drm_ati_pcigart_info * gart_info); + extern int drm_ati_pcigart_cleanup(struct drm_device *dev, + struct drm_ati_pcigart_info * gart_info); ++extern int drm_ati_alloc_pcigart_table(struct drm_device *dev, ++ struct drm_ati_pcigart_info *gart_info); + + extern drm_dma_handle_t *drm_pci_alloc(struct drm_device *dev, size_t size, + size_t align, dma_addr_t maxaddr); +@@ -1444,6 +1502,39 @@ extern void drm_free(void *pt, size_t size, int area); + extern void *drm_calloc(size_t nmemb, size_t size, int area); + #endif + ++/* ++ * Accounting variants of standard calls. ++ */ ++ ++static inline void *drm_ctl_alloc(size_t size, int area) ++{ ++ void *ret; ++ if (drm_alloc_memctl(size)) ++ return NULL; ++ ret = drm_alloc(size, area); ++ if (!ret) ++ drm_free_memctl(size); ++ return ret; ++} ++ ++static inline void *drm_ctl_calloc(size_t nmemb, size_t size, int area) ++{ ++ void *ret; ++ ++ if (drm_alloc_memctl(nmemb*size)) ++ return NULL; ++ ret = drm_calloc(nmemb, size, area); ++ if (!ret) ++ drm_free_memctl(nmemb*size); ++ return ret; ++} ++ ++static inline void drm_ctl_free(void *pt, size_t size, int area) ++{ ++ drm_free(pt, size, area); ++ drm_free_memctl(size); ++} ++ + /*@}*/ + + #endif /* __KERNEL__ */ +diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h +index a341828..0af3487 100644 +--- a/include/drm/drm_crtc_helper.h ++++ b/include/drm/drm_crtc_helper.h +@@ -118,4 +118,6 @@ static inline void drm_connector_helper_add(struct drm_connector *connector, + } + + extern int drm_helper_resume_force_mode(struct drm_device *dev); ++extern void drm_helper_set_connector_dpms(struct drm_connector *connector, ++ int dpms_mode); + #endif +diff --git a/include/drm/drm_objects.h b/include/drm/drm_objects.h +new file mode 100644 +index 0000000..604c8f8 +--- /dev/null ++++ b/include/drm/drm_objects.h +@@ -0,0 +1,913 @@ ++/************************************************************************** ++ * ++ * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sub license, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial portions ++ * of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, ++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR ++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE ++ * USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ **************************************************************************/ ++/* ++ * Authors: Thomas Hellström ++ */ ++ ++#ifndef _DRM_OBJECTS_H ++#define _DRM_OBJECTS_H ++ ++struct drm_device; ++struct drm_bo_mem_reg; ++ ++#define DRM_FENCE_FLAG_EMIT 0x00000001 ++#define DRM_FENCE_FLAG_SHAREABLE 0x00000002 ++/** ++ * On hardware with no interrupt events for operation completion, ++ * indicates that the kernel should sleep while waiting for any blocking ++ * operation to complete rather than spinning. ++ * ++ * Has no effect otherwise. ++ */ ++#define DRM_FENCE_FLAG_WAIT_LAZY 0x00000004 ++#define DRM_FENCE_FLAG_NO_USER 0x00000010 ++ ++/* Reserved for driver use */ ++#define DRM_FENCE_MASK_DRIVER 0xFF000000 ++ ++#define DRM_FENCE_TYPE_EXE 0x00000001 ++ ++struct drm_fence_arg { ++ unsigned int handle; ++ unsigned int fence_class; ++ unsigned int type; ++ unsigned int flags; ++ unsigned int signaled; ++ unsigned int error; ++ unsigned int sequence; ++ unsigned int pad64; ++ uint64_t expand_pad[2]; /*Future expansion */ ++}; ++ ++/* Buffer permissions, referring to how the GPU uses the buffers. ++ * these translate to fence types used for the buffers. ++ * Typically a texture buffer is read, A destination buffer is write and ++ * a command (batch-) buffer is exe. Can be or-ed together. ++ */ ++ ++#define DRM_BO_FLAG_READ (1ULL << 0) ++#define DRM_BO_FLAG_WRITE (1ULL << 1) ++#define DRM_BO_FLAG_EXE (1ULL << 2) ++ ++/* ++ * All of the bits related to access mode ++ */ ++#define DRM_BO_MASK_ACCESS (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE | DRM_BO_FLAG_EXE) ++/* ++ * Status flags. Can be read to determine the actual state of a buffer. ++ * Can also be set in the buffer mask before validation. ++ */ ++ ++/* ++ * Mask: Never evict this buffer. Not even with force. This type of buffer is only ++ * available to root and must be manually removed before buffer manager shutdown ++ * or lock. ++ * Flags: Acknowledge ++ */ ++#define DRM_BO_FLAG_NO_EVICT (1ULL << 4) ++ ++/* ++ * Mask: Require that the buffer is placed in mappable memory when validated. ++ * If not set the buffer may or may not be in mappable memory when validated. ++ * Flags: If set, the buffer is in mappable memory. ++ */ ++#define DRM_BO_FLAG_MAPPABLE (1ULL << 5) ++ ++/* Mask: The buffer should be shareable with other processes. ++ * Flags: The buffer is shareable with other processes. ++ */ ++#define DRM_BO_FLAG_SHAREABLE (1ULL << 6) ++ ++/* Mask: If set, place the buffer in cache-coherent memory if available. ++ * If clear, never place the buffer in cache coherent memory if validated. ++ * Flags: The buffer is currently in cache-coherent memory. ++ */ ++#define DRM_BO_FLAG_CACHED (1ULL << 7) ++ ++/* Mask: Make sure that every time this buffer is validated, ++ * it ends up on the same location provided that the memory mask is the same. ++ * The buffer will also not be evicted when claiming space for ++ * other buffers. Basically a pinned buffer but it may be thrown out as ++ * part of buffer manager shutdown or locking. ++ * Flags: Acknowledge. ++ */ ++#define DRM_BO_FLAG_NO_MOVE (1ULL << 8) ++ ++/* ++ * Mask: if set the note the buffer contents are discardable ++ * Flags: if set the buffer contents are discardable on migration ++ */ ++#define DRM_BO_FLAG_DISCARDABLE (1ULL << 9) ++ ++/* Mask: Make sure the buffer is in cached memory when mapped. In conjunction ++ * with DRM_BO_FLAG_CACHED it also allows the buffer to be bound into the GART ++ * with unsnooped PTEs instead of snooped, by using chipset-specific cache ++ * flushing at bind time. A better name might be DRM_BO_FLAG_TT_UNSNOOPED, ++ * as the eviction to local memory (TTM unbind) on map is just a side effect ++ * to prevent aggressive cache prefetch from the GPU disturbing the cache ++ * management that the DRM is doing. ++ * ++ * Flags: Acknowledge. ++ * Buffers allocated with this flag should not be used for suballocators ++ * This type may have issues on CPUs with over-aggressive caching ++ * http://marc.info/?l=linux-kernel&m=102376926732464&w=2 ++ */ ++#define DRM_BO_FLAG_CACHED_MAPPED (1ULL << 19) ++ ++ ++/* Mask: Force DRM_BO_FLAG_CACHED flag strictly also if it is set. ++ * Flags: Acknowledge. ++ */ ++#define DRM_BO_FLAG_FORCE_CACHING (1ULL << 13) ++ ++/* ++ * Mask: Force DRM_BO_FLAG_MAPPABLE flag strictly also if it is clear. ++ * Flags: Acknowledge. ++ */ ++#define DRM_BO_FLAG_FORCE_MAPPABLE (1ULL << 14) ++#define DRM_BO_FLAG_TILE (1ULL << 15) ++ ++/* ++ * Buffer has been mapped or touched since creation ++ * for VRAM we don't need to migrate, just fill with 0s for non-dirty ++ */ ++#define DRM_BO_FLAG_CLEAN (1ULL << 16) ++ ++/* ++ * Memory type flags that can be or'ed together in the mask, but only ++ * one appears in flags. ++ */ ++ ++/* System memory */ ++#define DRM_BO_FLAG_MEM_LOCAL (1ULL << 24) ++/* Translation table memory */ ++#define DRM_BO_FLAG_MEM_TT (1ULL << 25) ++/* Vram memory */ ++#define DRM_BO_FLAG_MEM_VRAM (1ULL << 26) ++/* Up to the driver to define. */ ++#define DRM_BO_FLAG_MEM_PRIV0 (1ULL << 27) ++#define DRM_BO_FLAG_MEM_PRIV1 (1ULL << 28) ++#define DRM_BO_FLAG_MEM_PRIV2 (1ULL << 29) ++#define DRM_BO_FLAG_MEM_PRIV3 (1ULL << 30) ++#define DRM_BO_FLAG_MEM_PRIV4 (1ULL << 31) ++/* We can add more of these now with a 64-bit flag type */ ++ ++/* ++ * This is a mask covering all of the memory type flags; easier to just ++ * use a single constant than a bunch of | values. It covers ++ * DRM_BO_FLAG_MEM_LOCAL through DRM_BO_FLAG_MEM_PRIV4 ++ */ ++#define DRM_BO_MASK_MEM 0x00000000FF000000ULL ++/* ++ * This adds all of the CPU-mapping options in with the memory ++ * type to label all bits which change how the page gets mapped ++ */ ++#define DRM_BO_MASK_MEMTYPE (DRM_BO_MASK_MEM | \ ++ DRM_BO_FLAG_CACHED_MAPPED | \ ++ DRM_BO_FLAG_CACHED | \ ++ DRM_BO_FLAG_MAPPABLE) ++ ++/* Driver-private flags */ ++#define DRM_BO_MASK_DRIVER 0xFFFF000000000000ULL ++ ++/* ++ * Don't block on validate and map. Instead, return EBUSY. ++ */ ++#define DRM_BO_HINT_DONT_BLOCK 0x00000002 ++/* ++ * Don't place this buffer on the unfenced list. This means ++ * that the buffer will not end up having a fence associated ++ * with it as a result of this operation ++ */ ++#define DRM_BO_HINT_DONT_FENCE 0x00000004 ++/** ++ * On hardware with no interrupt events for operation completion, ++ * indicates that the kernel should sleep while waiting for any blocking ++ * operation to complete rather than spinning. ++ * ++ * Has no effect otherwise. ++ */ ++#define DRM_BO_HINT_WAIT_LAZY 0x00000008 ++/* ++ * The client has compute relocations refering to this buffer using the ++ * offset in the presumed_offset field. If that offset ends up matching ++ * where this buffer lands, the kernel is free to skip executing those ++ * relocations ++ */ ++#define DRM_BO_HINT_PRESUMED_OFFSET 0x00000010 ++ ++#define DRM_BO_MEM_LOCAL 0 ++#define DRM_BO_MEM_TT 1 ++#define DRM_BO_MEM_VRAM 2 ++#define DRM_BO_MEM_PRIV0 3 ++#define DRM_BO_MEM_PRIV1 4 ++#define DRM_BO_MEM_PRIV2 5 ++#define DRM_BO_MEM_PRIV3 6 ++#define DRM_BO_MEM_PRIV4 7 ++ ++#define DRM_BO_MEM_TYPES 8 /* For now. */ ++ ++#define DRM_BO_LOCK_UNLOCK_BM (1 << 0) ++#define DRM_BO_LOCK_IGNORE_NO_EVICT (1 << 1) ++ ++ ++/*************************************************** ++ * Fence objects. (drm_fence.c) ++ */ ++ ++struct drm_fence_object { ++ struct drm_device *dev; ++ atomic_t usage; ++ ++ /* ++ * The below three fields are protected by the fence manager spinlock. ++ */ ++ ++ struct list_head ring; ++ int fence_class; ++ uint32_t native_types; ++ uint32_t type; ++ uint32_t signaled_types; ++ uint32_t sequence; ++ uint32_t waiting_types; ++ uint32_t error; ++}; ++ ++#define _DRM_FENCE_CLASSES 8 ++ ++struct drm_fence_class_manager { ++ struct list_head ring; ++ uint32_t pending_flush; ++ uint32_t waiting_types; ++ wait_queue_head_t fence_queue; ++ uint32_t highest_waiting_sequence; ++ uint32_t latest_queued_sequence; ++}; ++ ++struct drm_fence_manager { ++ int initialized; ++ rwlock_t lock; ++ struct drm_fence_class_manager fence_class[_DRM_FENCE_CLASSES]; ++ uint32_t num_classes; ++ atomic_t count; ++}; ++ ++struct drm_fence_driver { ++ unsigned long *waiting_jiffies; ++ uint32_t num_classes; ++ uint32_t wrap_diff; ++ uint32_t flush_diff; ++ uint32_t sequence_mask; ++ ++ /* ++ * Driver implemented functions: ++ * has_irq() : 1 if the hardware can update the indicated type_flags using an ++ * irq handler. 0 if polling is required. ++ * ++ * emit() : Emit a sequence number to the command stream. ++ * Return the sequence number. ++ * ++ * flush() : Make sure the flags indicated in fc->pending_flush will eventually ++ * signal for fc->highest_received_sequence and all preceding sequences. ++ * Acknowledge by clearing the flags fc->pending_flush. ++ * ++ * poll() : Call drm_fence_handler with any new information. ++ * ++ * needed_flush() : Given the current state of the fence->type flags and previusly ++ * executed or queued flushes, return the type_flags that need flushing. ++ * ++ * wait(): Wait for the "mask" flags to signal on a given fence, performing ++ * whatever's necessary to make this happen. ++ */ ++ ++ int (*has_irq) (struct drm_device *dev, uint32_t fence_class, ++ uint32_t flags); ++ int (*emit) (struct drm_device *dev, uint32_t fence_class, ++ uint32_t flags, uint32_t *breadcrumb, ++ uint32_t *native_type); ++ void (*flush) (struct drm_device *dev, uint32_t fence_class); ++ void (*poll) (struct drm_device *dev, uint32_t fence_class, ++ uint32_t types); ++ uint32_t (*needed_flush) (struct drm_fence_object *fence); ++ int (*wait) (struct drm_fence_object *fence, int lazy, ++ int interruptible, uint32_t mask); ++}; ++ ++extern int drm_fence_wait_polling(struct drm_fence_object *fence, int lazy, ++ int interruptible, uint32_t mask, ++ unsigned long end_jiffies); ++extern void drm_fence_handler(struct drm_device *dev, uint32_t fence_class, ++ uint32_t sequence, uint32_t type, ++ uint32_t error); ++extern void drm_fence_manager_init(struct drm_device *dev); ++extern void drm_fence_manager_takedown(struct drm_device *dev); ++extern void drm_fence_flush_old(struct drm_device *dev, uint32_t fence_class, ++ uint32_t sequence); ++extern int drm_fence_object_flush(struct drm_fence_object *fence, ++ uint32_t type); ++extern int drm_fence_object_signaled(struct drm_fence_object *fence, ++ uint32_t type); ++extern void drm_fence_usage_deref_locked(struct drm_fence_object **fence); ++extern void drm_fence_usage_deref_unlocked(struct drm_fence_object **fence); ++extern struct drm_fence_object *drm_fence_reference_locked(struct drm_fence_object *src); ++extern void drm_fence_reference_unlocked(struct drm_fence_object **dst, ++ struct drm_fence_object *src); ++extern int drm_fence_object_wait(struct drm_fence_object *fence, ++ int lazy, int ignore_signals, uint32_t mask); ++extern int drm_fence_object_create(struct drm_device *dev, uint32_t type, ++ uint32_t fence_flags, uint32_t fence_class, ++ struct drm_fence_object **c_fence); ++extern int drm_fence_object_emit(struct drm_fence_object *fence, ++ uint32_t fence_flags, uint32_t class, ++ uint32_t type); ++extern void drm_fence_fill_arg(struct drm_fence_object *fence, ++ struct drm_fence_arg *arg); ++ ++extern int drm_fence_add_user_object(struct drm_file *priv, ++ struct drm_fence_object *fence, ++ int shareable); ++ ++extern int drm_fence_create_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_fence_destroy_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_fence_reference_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_fence_unreference_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_fence_signaled_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_fence_flush_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_fence_wait_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_fence_emit_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_fence_buffers_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++/************************************************** ++ *TTMs ++ */ ++ ++/* ++ * The ttm backend GTT interface. (In our case AGP). ++ * Any similar type of device (PCIE?) ++ * needs only to implement these functions to be usable with the TTM interface. ++ * The AGP backend implementation lives in drm_agpsupport.c ++ * basically maps these calls to available functions in agpgart. ++ * Each drm device driver gets an ++ * additional function pointer that creates these types, ++ * so that the device can choose the correct aperture. ++ * (Multiple AGP apertures, etc.) ++ * Most device drivers will let this point to the standard AGP implementation. ++ */ ++ ++#define DRM_BE_FLAG_NEEDS_FREE 0x00000001 ++#define DRM_BE_FLAG_BOUND_CACHED 0x00000002 ++ ++struct drm_ttm_backend; ++struct drm_ttm_backend_func { ++ int (*needs_ub_cache_adjust) (struct drm_ttm_backend *backend); ++ int (*populate) (struct drm_ttm_backend *backend, ++ unsigned long num_pages, struct page **pages, ++ struct page *dummy_read_page); ++ void (*clear) (struct drm_ttm_backend *backend); ++ int (*bind) (struct drm_ttm_backend *backend, ++ struct drm_bo_mem_reg *bo_mem); ++ int (*unbind) (struct drm_ttm_backend *backend); ++ void (*destroy) (struct drm_ttm_backend *backend); ++}; ++ ++/** ++ * This structure associates a set of flags and methods with a drm_ttm ++ * object, and will also be subclassed by the particular backend. ++ * ++ * \sa #drm_agp_ttm_backend ++ */ ++struct drm_ttm_backend { ++ struct drm_device *dev; ++ uint32_t flags; ++ struct drm_ttm_backend_func *func; ++}; ++ ++struct drm_ttm { ++ struct page *dummy_read_page; ++ struct page **pages; ++ long first_himem_page; ++ long last_lomem_page; ++ uint32_t page_flags; ++ unsigned long num_pages; ++ atomic_t vma_count; ++ struct drm_device *dev; ++ int destroy; ++ uint32_t mapping_offset; ++ struct drm_ttm_backend *be; ++ unsigned long highest_lomem_entry; ++ unsigned long lowest_himem_entry; ++ enum { ++ ttm_bound, ++ ttm_evicted, ++ ttm_unbound, ++ ttm_unpopulated, ++ } state; ++ ++}; ++ ++extern struct drm_ttm *drm_ttm_create(struct drm_device *dev, unsigned long size, ++ uint32_t page_flags, ++ struct page *dummy_read_page); ++extern int drm_ttm_bind(struct drm_ttm *ttm, struct drm_bo_mem_reg *bo_mem); ++extern void drm_ttm_unbind(struct drm_ttm *ttm); ++extern void drm_ttm_evict(struct drm_ttm *ttm); ++extern void drm_ttm_fixup_caching(struct drm_ttm *ttm); ++extern struct page *drm_ttm_get_page(struct drm_ttm *ttm, int index); ++extern void drm_ttm_cache_flush(struct page *pages[], unsigned long num_pages); ++extern int drm_ttm_populate(struct drm_ttm *ttm); ++extern int drm_ttm_set_user(struct drm_ttm *ttm, ++ struct task_struct *tsk, ++ unsigned long start, ++ unsigned long num_pages); ++ ++/* ++ * Destroy a ttm. The user normally calls drmRmMap or a similar IOCTL to do ++ * this which calls this function iff there are no vmas referencing it anymore. ++ * Otherwise it is called when the last vma exits. ++ */ ++ ++extern int drm_ttm_destroy(struct drm_ttm *ttm); ++ ++#define DRM_FLAG_MASKED(_old, _new, _mask) {\ ++(_old) ^= (((_old) ^ (_new)) & (_mask)); \ ++} ++ ++#define DRM_TTM_MASK_FLAGS ((1 << PAGE_SHIFT) - 1) ++#define DRM_TTM_MASK_PFN (0xFFFFFFFFU - DRM_TTM_MASK_FLAGS) ++ ++/* ++ * Page flags. ++ */ ++ ++/* ++ * This ttm should not be cached by the CPU ++ */ ++#define DRM_TTM_PAGE_UNCACHED (1 << 0) ++/* ++ * This flat is not used at this time; I don't know what the ++ * intent was ++ */ ++#define DRM_TTM_PAGE_USED (1 << 1) ++/* ++ * This flat is not used at this time; I don't know what the ++ * intent was ++ */ ++#define DRM_TTM_PAGE_BOUND (1 << 2) ++/* ++ * This flat is not used at this time; I don't know what the ++ * intent was ++ */ ++#define DRM_TTM_PAGE_PRESENT (1 << 3) ++/* ++ * The array of page pointers was allocated with vmalloc ++ * instead of drm_calloc. ++ */ ++#define DRM_TTM_PAGEDIR_VMALLOC (1 << 4) ++/* ++ * This ttm is mapped from user space ++ */ ++#define DRM_TTM_PAGE_USER (1 << 5) ++/* ++ * This ttm will be written to by the GPU ++ */ ++#define DRM_TTM_PAGE_WRITE (1 << 6) ++/* ++ * This ttm was mapped to the GPU, and so the contents may have ++ * been modified ++ */ ++#define DRM_TTM_PAGE_USER_DIRTY (1 << 7) ++/* ++ * This flag is not used at this time; I don't know what the ++ * intent was. ++ */ ++#define DRM_TTM_PAGE_USER_DMA (1 << 8) ++ ++/*************************************************** ++ * Buffer objects. (drm_bo.c, drm_bo_move.c) ++ */ ++ ++struct drm_bo_mem_reg { ++ struct drm_mm_node *mm_node; ++ unsigned long size; ++ unsigned long num_pages; ++ uint32_t page_alignment; ++ uint32_t mem_type; ++ /* ++ * Current buffer status flags, indicating ++ * where the buffer is located and which ++ * access modes are in effect ++ */ ++ uint64_t flags; ++ /** ++ * These are the flags proposed for ++ * a validate operation. If the ++ * validate succeeds, they'll get moved ++ * into the flags field ++ */ ++ uint64_t proposed_flags; ++ ++ uint32_t desired_tile_stride; ++ uint32_t hw_tile_stride; ++}; ++ ++enum drm_bo_type { ++ /* ++ * drm_bo_type_device are 'normal' drm allocations, ++ * pages are allocated from within the kernel automatically ++ * and the objects can be mmap'd from the drm device. Each ++ * drm_bo_type_device object has a unique name which can be ++ * used by other processes to share access to the underlying ++ * buffer. ++ */ ++ drm_bo_type_device, ++ /* ++ * drm_bo_type_user are buffers of pages that already exist ++ * in the process address space. They are more limited than ++ * drm_bo_type_device buffers in that they must always ++ * remain cached (as we assume the user pages are mapped cached), ++ * and they are not sharable to other processes through DRM ++ * (although, regular shared memory should still work fine). ++ */ ++ drm_bo_type_user, ++ /* ++ * drm_bo_type_kernel are buffers that exist solely for use ++ * within the kernel. The pages cannot be mapped into the ++ * process. One obvious use would be for the ring ++ * buffer where user access would not (ideally) be required. ++ */ ++ drm_bo_type_kernel, ++}; ++ ++struct drm_buffer_object { ++ struct drm_device *dev; ++ ++ /* ++ * If there is a possibility that the usage variable is zero, ++ * then dev->struct_mutext should be locked before incrementing it. ++ */ ++ ++ atomic_t usage; ++ unsigned long buffer_start; ++ enum drm_bo_type type; ++ unsigned long offset; ++ atomic_t mapped; ++ struct drm_bo_mem_reg mem; ++ ++ struct list_head lru; ++ struct list_head ddestroy; ++ ++ uint32_t fence_type; ++ uint32_t fence_class; ++ uint32_t new_fence_type; ++ uint32_t new_fence_class; ++ struct drm_fence_object *fence; ++ uint32_t priv_flags; ++ wait_queue_head_t event_queue; ++ struct mutex mutex; ++ unsigned long num_pages; ++ ++ /* For pinned buffers */ ++ struct drm_mm_node *pinned_node; ++ uint32_t pinned_mem_type; ++ struct list_head pinned_lru; ++ ++ /* For vm */ ++ struct drm_ttm *ttm; ++ struct drm_map_list map_list; ++ uint32_t memory_type; ++ unsigned long bus_offset; ++ uint32_t vm_flags; ++ void *iomap; ++ ++#ifdef DRM_ODD_MM_COMPAT ++ /* dev->struct_mutex only protected. */ ++ struct list_head vma_list; ++ struct list_head p_mm_list; ++#endif ++ ++}; ++ ++#define _DRM_BO_FLAG_UNFENCED 0x00000001 ++#define _DRM_BO_FLAG_EVICTED 0x00000002 ++ ++/* ++ * This flag indicates that a flag called with bo->mutex held has ++ * temporarily released the buffer object mutex, (usually to wait for something). ++ * and thus any post-lock validation needs to be rerun. ++ */ ++ ++#define _DRM_BO_FLAG_UNLOCKED 0x00000004 ++ ++struct drm_mem_type_manager { ++ int has_type; ++ int use_type; ++ int kern_init_type; ++ struct drm_mm manager; ++ struct list_head lru; ++ struct list_head pinned; ++ uint32_t flags; ++ uint32_t drm_bus_maptype; ++ unsigned long gpu_offset; ++ unsigned long io_offset; ++ unsigned long io_size; ++ void *io_addr; ++ uint64_t size; /* size of managed area for reporting to userspace */ ++}; ++ ++struct drm_bo_lock { ++ // struct drm_user_object base; ++ wait_queue_head_t queue; ++ atomic_t write_lock_pending; ++ atomic_t readers; ++}; ++ ++#define _DRM_FLAG_MEMTYPE_FIXED 0x00000001 /* Fixed (on-card) PCI memory */ ++#define _DRM_FLAG_MEMTYPE_MAPPABLE 0x00000002 /* Memory mappable */ ++#define _DRM_FLAG_MEMTYPE_CACHED 0x00000004 /* Cached binding */ ++#define _DRM_FLAG_NEEDS_IOREMAP 0x00000008 /* Fixed memory needs ioremap ++ before kernel access. */ ++#define _DRM_FLAG_MEMTYPE_CMA 0x00000010 /* Can't map aperture */ ++#define _DRM_FLAG_MEMTYPE_CSELECT 0x00000020 /* Select caching */ ++ ++#define _DRM_BM_ALLOCATOR_CACHED 0x0 ++#define _DRM_BM_ALLOCATOR_UNCACHED 0x1 ++ ++struct drm_buffer_manager { ++ struct drm_bo_lock bm_lock; ++ struct mutex evict_mutex; ++ int nice_mode; ++ int initialized; ++ struct drm_file *last_to_validate; ++ struct drm_mem_type_manager man[DRM_BO_MEM_TYPES]; ++ struct list_head unfenced; ++ struct list_head ddestroy; ++ struct delayed_work wq; ++ uint32_t fence_type; ++ unsigned long cur_pages; ++ atomic_t count; ++ struct page *dummy_read_page; ++ int allocator_type; ++}; ++ ++struct drm_bo_driver { ++ const uint32_t *mem_type_prio; ++ const uint32_t *mem_busy_prio; ++ uint32_t num_mem_type_prio; ++ uint32_t num_mem_busy_prio; ++ struct drm_ttm_backend *(*create_ttm_backend_entry) ++ (struct drm_device *dev); ++ int (*fence_type) (struct drm_buffer_object *bo, uint32_t *fclass, ++ uint32_t *type); ++ int (*invalidate_caches) (struct drm_device *dev, uint64_t flags); ++ int (*init_mem_type) (struct drm_device *dev, uint32_t type, ++ struct drm_mem_type_manager *man); ++ /* ++ * evict_flags: ++ * ++ * @bo: the buffer object to be evicted ++ * ++ * Return the bo flags for a buffer which is not mapped to the hardware. ++ * These will be placed in proposed_flags so that when the move is ++ * finished, they'll end up in bo->mem.flags ++ */ ++ uint64_t(*evict_flags) (struct drm_buffer_object *bo); ++ /* ++ * move: ++ * ++ * @bo: the buffer to move ++ * ++ * @evict: whether this motion is evicting the buffer from ++ * the graphics address space ++ * ++ * @no_wait: whether this should give up and return -EBUSY ++ * if this move would require sleeping ++ * ++ * @new_mem: the new memory region receiving the buffer ++ * ++ * Move a buffer between two memory regions. ++ */ ++ int (*move) (struct drm_buffer_object *bo, ++ int evict, int no_wait, struct drm_bo_mem_reg *new_mem); ++ /* ++ * ttm_cache_flush ++ */ ++ void (*ttm_cache_flush)(struct drm_ttm *ttm); ++ ++ /* ++ * command_stream_barrier ++ * ++ * @dev: The drm device. ++ * ++ * @bo: The buffer object to validate. ++ * ++ * @new_fence_class: The new fence class for the buffer object. ++ * ++ * @new_fence_type: The new fence type for the buffer object. ++ * ++ * @no_wait: whether this should give up and return -EBUSY ++ * if this operation would require sleeping ++ * ++ * Insert a command stream barrier that makes sure that the ++ * buffer is idle once the commands associated with the ++ * current validation are starting to execute. If an error ++ * condition is returned, or the function pointer is NULL, ++ * the drm core will force buffer idle ++ * during validation. ++ */ ++ ++ int (*command_stream_barrier) (struct drm_buffer_object *bo, ++ uint32_t new_fence_class, ++ uint32_t new_fence_type, ++ int no_wait); ++}; ++ ++/* ++ * buffer objects (drm_bo.c) ++ */ ++int drm_bo_do_validate(struct drm_buffer_object *bo, ++ uint64_t flags, uint64_t mask, uint32_t hint, ++ uint32_t fence_class); ++extern int drm_bo_set_pin(struct drm_device *dev, struct drm_buffer_object *bo, int pin); ++extern int drm_bo_driver_finish(struct drm_device *dev); ++extern int drm_bo_driver_init(struct drm_device *dev); ++extern int drm_bo_pci_offset(struct drm_device *dev, ++ struct drm_bo_mem_reg *mem, ++ unsigned long *bus_base, ++ unsigned long *bus_offset, ++ unsigned long *bus_size); ++extern int drm_mem_reg_is_pci(struct drm_device *dev, struct drm_bo_mem_reg *mem); ++ ++extern int drm_bo_add_user_object(struct drm_file *file_priv, ++ struct drm_buffer_object *bo, int shareable); ++extern void drm_bo_usage_deref_locked(struct drm_buffer_object **bo); ++extern void drm_bo_usage_deref_unlocked(struct drm_buffer_object **bo); ++extern void drm_putback_buffer_objects(struct drm_device *dev); ++extern int drm_fence_buffer_objects(struct drm_device *dev, ++ struct list_head *list, ++ uint32_t fence_flags, ++ struct drm_fence_object *fence, ++ struct drm_fence_object **used_fence); ++extern void drm_bo_add_to_lru(struct drm_buffer_object *bo); ++extern int drm_buffer_object_create(struct drm_device *dev, unsigned long size, ++ enum drm_bo_type type, uint64_t flags, ++ uint32_t hint, uint32_t page_alignment, ++ unsigned long buffer_start, ++ struct drm_buffer_object **bo); ++extern int drm_bo_wait(struct drm_buffer_object *bo, int lazy, int interruptible, ++ int no_wait, int check_unfenced); ++extern int drm_bo_mem_space(struct drm_buffer_object *bo, ++ struct drm_bo_mem_reg *mem, int no_wait); ++extern int drm_bo_move_buffer(struct drm_buffer_object *bo, ++ uint64_t new_mem_flags, ++ int no_wait, int move_unfenced); ++extern int drm_bo_clean_mm(struct drm_device *dev, unsigned mem_type, int kern_clean); ++extern int drm_bo_init_mm(struct drm_device *dev, unsigned type, ++ unsigned long p_offset, unsigned long p_size, ++ int kern_init); ++extern struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_priv, ++ uint32_t handle, ++ int check_owner); ++extern int drm_bo_evict_cached(struct drm_buffer_object *bo); ++ ++extern void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo); ++extern void drm_bo_evict_mm(struct drm_device *dev, int mem_type, int no_wait); ++/* ++ * Buffer object memory move- and map helpers. ++ * drm_bo_move.c ++ */ ++ ++extern int drm_bo_add_ttm(struct drm_buffer_object *bo); ++extern int drm_bo_move_ttm(struct drm_buffer_object *bo, ++ int evict, int no_wait, ++ struct drm_bo_mem_reg *new_mem); ++extern int drm_bo_move_memcpy(struct drm_buffer_object *bo, ++ int evict, ++ int no_wait, struct drm_bo_mem_reg *new_mem); ++extern int drm_bo_move_zero(struct drm_buffer_object *bo, ++ int evict, int no_wait, struct drm_bo_mem_reg *new_mem); ++extern int drm_bo_move_accel_cleanup(struct drm_buffer_object *bo, ++ int evict, int no_wait, ++ uint32_t fence_class, uint32_t fence_type, ++ uint32_t fence_flags, ++ struct drm_bo_mem_reg *new_mem); ++extern int drm_bo_same_page(unsigned long offset, unsigned long offset2); ++extern unsigned long drm_bo_offset_end(unsigned long offset, ++ unsigned long end); ++ ++struct drm_bo_kmap_obj { ++ void *virtual; ++ struct page *page; ++ enum { ++ bo_map_iomap, ++ bo_map_vmap, ++ bo_map_kmap, ++ bo_map_premapped, ++ } bo_kmap_type; ++}; ++ ++static inline void *drm_bmo_virtual(struct drm_bo_kmap_obj *map, int *is_iomem) ++{ ++ *is_iomem = (map->bo_kmap_type == bo_map_iomap || ++ map->bo_kmap_type == bo_map_premapped); ++ return map->virtual; ++} ++extern void drm_bo_kunmap(struct drm_bo_kmap_obj *map); ++extern int drm_bo_kmap(struct drm_buffer_object *bo, unsigned long start_page, ++ unsigned long num_pages, struct drm_bo_kmap_obj *map); ++extern int drm_bo_pfn_prot(struct drm_buffer_object *bo, ++ unsigned long dst_offset, ++ unsigned long *pfn, ++ pgprot_t *prot); ++ ++ ++/* ++ * drm_regman.c ++ */ ++ ++struct drm_reg { ++ struct list_head head; ++ struct drm_fence_object *fence; ++ uint32_t fence_type; ++ uint32_t new_fence_type; ++}; ++ ++struct drm_reg_manager { ++ struct list_head free; ++ struct list_head lru; ++ struct list_head unfenced; ++ ++ int (*reg_reusable)(const struct drm_reg *reg, const void *data); ++ void (*reg_destroy)(struct drm_reg *reg); ++}; ++ ++extern int drm_regs_alloc(struct drm_reg_manager *manager, ++ const void *data, ++ uint32_t fence_class, ++ uint32_t fence_type, ++ int interruptible, ++ int no_wait, ++ struct drm_reg **reg); ++ ++extern void drm_regs_fence(struct drm_reg_manager *regs, ++ struct drm_fence_object *fence); ++ ++extern void drm_regs_free(struct drm_reg_manager *manager); ++extern void drm_regs_add(struct drm_reg_manager *manager, struct drm_reg *reg); ++extern void drm_regs_init(struct drm_reg_manager *manager, ++ int (*reg_reusable)(const struct drm_reg *, ++ const void *), ++ void (*reg_destroy)(struct drm_reg *)); ++ ++extern int drm_mem_reg_ioremap(struct drm_device *dev, struct drm_bo_mem_reg * mem, ++ void **virtual); ++extern void drm_mem_reg_iounmap(struct drm_device *dev, struct drm_bo_mem_reg * mem, ++ void *virtual); ++ ++/* ++ * drm_uncached.c ++ */ ++extern int drm_uncached_init(void); ++extern void drm_uncached_fini(void); ++extern struct page *drm_get_uncached_page(void); ++extern void drm_put_uncached_page(struct page *page); ++ ++#ifdef CONFIG_DEBUG_MUTEXES ++#define DRM_ASSERT_LOCKED(_mutex) \ ++ BUG_ON(!mutex_is_locked(_mutex) || \ ++ ((_mutex)->owner != current_thread_info())) ++#else ++#define DRM_ASSERT_LOCKED(_mutex) ++#endif ++#endif +diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h +index b3bcf72..181d9de 100644 +--- a/include/drm/i915_drm.h ++++ b/include/drm/i915_drm.h +@@ -187,7 +187,7 @@ typedef struct _drm_i915_sarea { + + #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) + #define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) +-#define DRM_IOCTL_I915_FLIP DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLIP) ++#define DRM_IOCTL_I915_FLIP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FLIP, drm_i915_flip_t) + #define DRM_IOCTL_I915_BATCHBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_BATCHBUFFER, drm_i915_batchbuffer_t) + #define DRM_IOCTL_I915_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_IRQ_EMIT, drm_i915_irq_emit_t) + #define DRM_IOCTL_I915_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_IRQ_WAIT, drm_i915_irq_wait_t) +@@ -220,6 +220,18 @@ typedef struct _drm_i915_sarea { + #define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling) + #define DRM_IOCTL_I915_GEM_GET_APERTURE DRM_IOR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture) + ++/* Asynchronous page flipping: ++ */ ++typedef struct drm_i915_flip { ++ /* ++ * This is really talking about planes, and we could rename it ++ * except for the fact that some of the duplicated i915_drm.h files ++ * out there check for HAVE_I915_FLIP and so might pick up this ++ * version. ++ */ ++ int pipes; ++} drm_i915_flip_t; ++ + /* Allow drivers to submit batchbuffers directly to hardware, relying + * on the security mechanisms provided by hardware. + */ +diff --git a/include/drm/radeon_drm.h b/include/drm/radeon_drm.h +index 73ff51f..e4f5897 100644 +--- a/include/drm/radeon_drm.h ++++ b/include/drm/radeon_drm.h +@@ -453,6 +453,15 @@ typedef struct { + int pfCurrentPage; /* which buffer is being displayed? */ + int crtc2_base; /* CRTC2 frame offset */ + int tiling_enabled; /* set by drm, read by 2d + 3d clients */ ++ ++ unsigned int last_fence; ++ ++ uint32_t front_handle; ++ uint32_t back_handle; ++ uint32_t depth_handle; ++ uint32_t front_pitch; ++ uint32_t back_pitch; ++ uint32_t depth_pitch; + } drm_radeon_sarea_t; + + /* WARNING: If you change any of these defines, make sure to change the +@@ -493,6 +502,18 @@ typedef struct { + #define DRM_RADEON_SURF_ALLOC 0x1a + #define DRM_RADEON_SURF_FREE 0x1b + ++#define DRM_RADEON_GEM_INFO 0x1c ++#define DRM_RADEON_GEM_CREATE 0x1d ++#define DRM_RADEON_GEM_MMAP 0x1e ++#define DRM_RADEON_GEM_PIN 0x1f ++#define DRM_RADEON_GEM_UNPIN 0x20 ++#define DRM_RADEON_GEM_PREAD 0x21 ++#define DRM_RADEON_GEM_PWRITE 0x22 ++#define DRM_RADEON_GEM_SET_DOMAIN 0x23 ++#define DRM_RADEON_GEM_WAIT_RENDERING 0x24 ++ ++#define DRM_RADEON_CS 0x26 ++ + #define DRM_IOCTL_RADEON_CP_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_INIT, drm_radeon_init_t) + #define DRM_IOCTL_RADEON_CP_START DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_START) + #define DRM_IOCTL_RADEON_CP_STOP DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_STOP, drm_radeon_cp_stop_t) +@@ -521,6 +542,18 @@ typedef struct { + #define DRM_IOCTL_RADEON_SURF_ALLOC DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SURF_ALLOC, drm_radeon_surface_alloc_t) + #define DRM_IOCTL_RADEON_SURF_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SURF_FREE, drm_radeon_surface_free_t) + ++#define DRM_IOCTL_RADEON_GEM_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_INFO, struct drm_radeon_gem_info) ++#define DRM_IOCTL_RADEON_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_CREATE, struct drm_radeon_gem_create) ++#define DRM_IOCTL_RADEON_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_MMAP, struct drm_radeon_gem_mmap) ++#define DRM_IOCTL_RADEON_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_PIN, struct drm_radeon_gem_pin) ++#define DRM_IOCTL_RADEON_GEM_UNPIN DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_UNPIN, struct drm_radeon_gem_unpin) ++#define DRM_IOCTL_RADEON_GEM_PREAD DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_PREAD, struct drm_radeon_gem_pread) ++#define DRM_IOCTL_RADEON_GEM_PWRITE DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_PWRITE, struct drm_radeon_gem_pwrite) ++#define DRM_IOCTL_RADEON_GEM_SET_DOMAIN DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_SET_DOMAIN, struct drm_radeon_gem_set_domain) ++#define DRM_IOCTL_RADEON_GEM_WAIT_RENDERING DRM_IOW(DRM_COMMAND_BASE + DRM_RADEON_GEM_WAIT_RENDERING, struct drm_radeon_gem_wait_rendering) ++#define DRM_IOCTL_RADEON_CS DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_CS, struct drm_radeon_cs) ++ ++ + typedef struct drm_radeon_init { + enum { + RADEON_INIT_CP = 0x01, +@@ -677,6 +710,7 @@ typedef struct drm_radeon_indirect { + #define RADEON_PARAM_VBLANK_CRTC 13 /* VBLANK CRTC */ + #define RADEON_PARAM_FB_LOCATION 14 /* FB location */ + #define RADEON_PARAM_NUM_GB_PIPES 15 /* num GB pipes */ ++#define RADEON_PARAM_KERNEL_MM 16 + + typedef struct drm_radeon_getparam { + int param; +@@ -731,6 +765,7 @@ typedef struct drm_radeon_setparam { + #define RADEON_SETPARAM_NEW_MEMMAP 4 /* Use new memory map */ + #define RADEON_SETPARAM_PCIGART_TABLE_SIZE 5 /* PCI GART Table Size */ + #define RADEON_SETPARAM_VBLANK_CRTC 6 /* VBLANK CRTC */ ++#define RADEON_SETPARAM_MM_INIT 7 /* DDX wants memory manager but has no modesetting */ + /* 1.14: Clients can allocate/free a surface + */ + typedef struct drm_radeon_surface_alloc { +@@ -746,4 +781,106 @@ typedef struct drm_radeon_surface_free { + #define DRM_RADEON_VBLANK_CRTC1 1 + #define DRM_RADEON_VBLANK_CRTC2 2 + ++#define RADEON_GEM_DOMAIN_CPU 0x1 // Cached CPU domain ++#define RADEON_GEM_DOMAIN_GTT 0x2 // GTT or cache flushed ++#define RADEON_GEM_DOMAIN_VRAM 0x4 // VRAM domain ++ ++/* return to userspace start/size of gtt and vram apertures */ ++struct drm_radeon_gem_info { ++ uint64_t gart_start; ++ uint64_t gart_size; ++ uint64_t vram_start; ++ uint64_t vram_size; ++ uint64_t vram_visible; ++}; ++ ++struct drm_radeon_gem_create { ++ uint64_t size; ++ uint64_t alignment; ++ uint32_t handle; ++ uint32_t initial_domain; // to allow VRAM to be created ++ uint32_t no_backing_store; // for VRAM objects - select whether they need backing store ++ // pretty much front/back/depth don't need it - other things do ++}; ++ ++struct drm_radeon_gem_mmap { ++ uint32_t handle; ++ uint32_t pad; ++ uint64_t offset; ++ uint64_t size; ++ uint64_t addr_ptr; ++}; ++ ++struct drm_radeon_gem_set_domain { ++ uint32_t handle; ++ uint32_t read_domains; ++ uint32_t write_domain; ++}; ++ ++struct drm_radeon_gem_wait_rendering { ++ uint32_t handle; ++}; ++ ++struct drm_radeon_gem_pin { ++ uint32_t handle; ++ uint32_t pin_domain; ++ uint64_t alignment; ++ uint64_t offset; ++}; ++ ++struct drm_radeon_gem_unpin { ++ uint32_t handle; ++ uint32_t pad; ++}; ++ ++struct drm_radeon_gem_busy { ++ uint32_t handle; ++ uint32_t busy; ++}; ++ ++struct drm_radeon_gem_pread { ++ /** Handle for the object being read. */ ++ uint32_t handle; ++ uint32_t pad; ++ /** Offset into the object to read from */ ++ uint64_t offset; ++ /** Length of data to read */ ++ uint64_t size; ++ /** Pointer to write the data into. */ ++ uint64_t data_ptr; /* void *, but pointers are not 32/64 compatible */ ++}; ++ ++struct drm_radeon_gem_pwrite { ++ /** Handle for the object being written to. */ ++ uint32_t handle; ++ uint32_t pad; ++ /** Offset into the object to write to */ ++ uint64_t offset; ++ /** Length of data to write */ ++ uint64_t size; ++ /** Pointer to read the data from. */ ++ uint64_t data_ptr; /* void *, but pointers are not 32/64 compatible */ ++}; ++ ++ ++/* New interface which obsolete all previous interface. ++ */ ++#define RADEON_CHUNK_ID_RELOCS 0x01 ++#define RADEON_CHUNK_ID_IB 0x02 ++#define RADEON_CHUNK_ID_OLD 0xff ++ ++struct drm_radeon_cs_chunk { ++ uint32_t chunk_id; ++ uint32_t length_dw; ++ uint64_t chunk_data; ++}; ++ ++struct drm_radeon_cs { ++ uint32_t num_chunks; ++ uint32_t cs_id; ++ uint64_t chunks; /* this points to uint64_t * which point to ++ cs chunks */ ++}; ++ ++ + #endif diff --git a/sys-kernel/geos_one-sources/files/drm-next.patch b/sys-kernel/geos_one-sources/files/drm-next.patch new file mode 100644 index 00000000..e1df9c0e --- /dev/null +++ b/sys-kernel/geos_one-sources/files/drm-next.patch @@ -0,0 +1,21344 @@ +diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig +index a8b33c2..5130b72 100644 +--- a/drivers/gpu/drm/Kconfig ++++ b/drivers/gpu/drm/Kconfig +@@ -7,6 +7,8 @@ + menuconfig DRM + tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)" + depends on (AGP || AGP=n) && PCI && !EMULATED_CMPXCHG && MMU ++ select I2C ++ select I2C_ALGOBIT + help + Kernel-level support for the Direct Rendering Infrastructure (DRI) + introduced in XFree86 4.0. If you say Y here, you need to select +@@ -65,6 +67,10 @@ config DRM_I830 + will load the correct one. + + config DRM_I915 ++ select FB_CFB_FILLRECT ++ select FB_CFB_COPYAREA ++ select FB_CFB_IMAGEBLIT ++ depends on FB + tristate "i915 driver" + help + Choose this option if you have a system that has Intel 830M, 845G, +@@ -76,6 +82,17 @@ config DRM_I915 + + endchoice + ++config DRM_I915_KMS ++ bool "Enable modesetting on intel by default" ++ depends on DRM_I915 ++ help ++ Choose this option if you want kernel modesetting enabled by default, ++ and you have a new enough userspace to support this. Running old ++ userspaces with this enabled will cause pain. Note that this causes ++ the driver to bind to PCI devices, which precludes loading things ++ like intelfb. ++ ++ + config DRM_MGA + tristate "Matrox g200/g400" + depends on DRM +diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile +index 74da994..30022c4 100644 +--- a/drivers/gpu/drm/Makefile ++++ b/drivers/gpu/drm/Makefile +@@ -9,7 +9,8 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \ + drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \ + drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \ + drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \ +- drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o ++ drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o \ ++ drm_crtc.o drm_crtc_helper.o drm_modes.o drm_edid.o + + drm-$(CONFIG_COMPAT) += drm_ioc32.o + +diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c +index a734627..ca7a9ef 100644 +--- a/drivers/gpu/drm/drm_auth.c ++++ b/drivers/gpu/drm/drm_auth.c +@@ -45,14 +45,15 @@ + * the one with matching magic number, while holding the drm_device::struct_mutex + * lock. + */ +-static struct drm_file *drm_find_file(struct drm_device * dev, drm_magic_t magic) ++static struct drm_file *drm_find_file(struct drm_master *master, drm_magic_t magic) + { + struct drm_file *retval = NULL; + struct drm_magic_entry *pt; + struct drm_hash_item *hash; ++ struct drm_device *dev = master->minor->dev; + + mutex_lock(&dev->struct_mutex); +- if (!drm_ht_find_item(&dev->magiclist, (unsigned long)magic, &hash)) { ++ if (!drm_ht_find_item(&master->magiclist, (unsigned long)magic, &hash)) { + pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item); + retval = pt->priv; + } +@@ -71,11 +72,11 @@ static struct drm_file *drm_find_file(struct drm_device * dev, drm_magic_t magic + * associated the magic number hash key in drm_device::magiclist, while holding + * the drm_device::struct_mutex lock. + */ +-static int drm_add_magic(struct drm_device * dev, struct drm_file * priv, ++static int drm_add_magic(struct drm_master *master, struct drm_file *priv, + drm_magic_t magic) + { + struct drm_magic_entry *entry; +- ++ struct drm_device *dev = master->minor->dev; + DRM_DEBUG("%d\n", magic); + + entry = drm_alloc(sizeof(*entry), DRM_MEM_MAGIC); +@@ -83,11 +84,10 @@ static int drm_add_magic(struct drm_device * dev, struct drm_file * priv, + return -ENOMEM; + memset(entry, 0, sizeof(*entry)); + entry->priv = priv; +- + entry->hash_item.key = (unsigned long)magic; + mutex_lock(&dev->struct_mutex); +- drm_ht_insert_item(&dev->magiclist, &entry->hash_item); +- list_add_tail(&entry->head, &dev->magicfree); ++ drm_ht_insert_item(&master->magiclist, &entry->hash_item); ++ list_add_tail(&entry->head, &master->magicfree); + mutex_unlock(&dev->struct_mutex); + + return 0; +@@ -102,20 +102,21 @@ static int drm_add_magic(struct drm_device * dev, struct drm_file * priv, + * Searches and unlinks the entry in drm_device::magiclist with the magic + * number hash key, while holding the drm_device::struct_mutex lock. + */ +-static int drm_remove_magic(struct drm_device * dev, drm_magic_t magic) ++static int drm_remove_magic(struct drm_master *master, drm_magic_t magic) + { + struct drm_magic_entry *pt; + struct drm_hash_item *hash; ++ struct drm_device *dev = master->minor->dev; + + DRM_DEBUG("%d\n", magic); + + mutex_lock(&dev->struct_mutex); +- if (drm_ht_find_item(&dev->magiclist, (unsigned long)magic, &hash)) { ++ if (drm_ht_find_item(&master->magiclist, (unsigned long)magic, &hash)) { + mutex_unlock(&dev->struct_mutex); + return -EINVAL; + } + pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item); +- drm_ht_remove_item(&dev->magiclist, hash); ++ drm_ht_remove_item(&master->magiclist, hash); + list_del(&pt->head); + mutex_unlock(&dev->struct_mutex); + +@@ -153,9 +154,9 @@ int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv) + ++sequence; /* reserve 0 */ + auth->magic = sequence++; + spin_unlock(&lock); +- } while (drm_find_file(dev, auth->magic)); ++ } while (drm_find_file(file_priv->master, auth->magic)); + file_priv->magic = auth->magic; +- drm_add_magic(dev, file_priv, auth->magic); ++ drm_add_magic(file_priv->master, file_priv, auth->magic); + } + + DRM_DEBUG("%u\n", auth->magic); +@@ -181,9 +182,9 @@ int drm_authmagic(struct drm_device *dev, void *data, + struct drm_file *file; + + DRM_DEBUG("%u\n", auth->magic); +- if ((file = drm_find_file(dev, auth->magic))) { ++ if ((file = drm_find_file(file_priv->master, auth->magic))) { + file->authenticated = 1; +- drm_remove_magic(dev, auth->magic); ++ drm_remove_magic(file_priv->master, auth->magic); + return 0; + } + return -EINVAL; +diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c +index bde64b8..72c667f 100644 +--- a/drivers/gpu/drm/drm_bufs.c ++++ b/drivers/gpu/drm/drm_bufs.c +@@ -54,9 +54,9 @@ static struct drm_map_list *drm_find_matching_map(struct drm_device *dev, + { + struct drm_map_list *entry; + list_for_each_entry(entry, &dev->maplist, head) { +- if (entry->map && map->type == entry->map->type && ++ if (entry->map && (entry->master == dev->primary->master) && (map->type == entry->map->type) && + ((entry->map->offset == map->offset) || +- (map->type == _DRM_SHM && map->flags==_DRM_CONTAINS_LOCK))) { ++ ((map->type == _DRM_SHM) && (map->flags&_DRM_CONTAINS_LOCK)))) { + return entry; + } + } +@@ -210,12 +210,12 @@ static int drm_addmap_core(struct drm_device * dev, unsigned int offset, + map->offset = (unsigned long)map->handle; + if (map->flags & _DRM_CONTAINS_LOCK) { + /* Prevent a 2nd X Server from creating a 2nd lock */ +- if (dev->lock.hw_lock != NULL) { ++ if (dev->primary->master->lock.hw_lock != NULL) { + vfree(map->handle); + drm_free(map, sizeof(*map), DRM_MEM_MAPS); + return -EBUSY; + } +- dev->sigdata.lock = dev->lock.hw_lock = map->handle; /* Pointer to lock */ ++ dev->sigdata.lock = dev->primary->master->lock.hw_lock = map->handle; /* Pointer to lock */ + } + break; + case _DRM_AGP: { +@@ -262,6 +262,9 @@ static int drm_addmap_core(struct drm_device * dev, unsigned int offset, + DRM_DEBUG("AGP offset = 0x%08lx, size = 0x%08lx\n", map->offset, map->size); + + break; ++ case _DRM_GEM: ++ DRM_ERROR("tried to rmmap GEM object\n"); ++ break; + } + case _DRM_SCATTER_GATHER: + if (!dev->sg) { +@@ -319,6 +322,7 @@ static int drm_addmap_core(struct drm_device * dev, unsigned int offset, + list->user_token = list->hash.key << PAGE_SHIFT; + mutex_unlock(&dev->struct_mutex); + ++ list->master = dev->primary->master; + *maplist = list; + return 0; + } +@@ -345,7 +349,7 @@ int drm_addmap_ioctl(struct drm_device *dev, void *data, + struct drm_map_list *maplist; + int err; + +- if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP)) ++ if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP || map->type == _DRM_SHM)) + return -EPERM; + + err = drm_addmap_core(dev, map->offset, map->size, map->type, +@@ -380,10 +384,12 @@ int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map) + struct drm_map_list *r_list = NULL, *list_t; + drm_dma_handle_t dmah; + int found = 0; ++ struct drm_master *master; + + /* Find the list entry for the map and remove it */ + list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) { + if (r_list->map == map) { ++ master = r_list->master; + list_del(&r_list->head); + drm_ht_remove_key(&dev->map_hash, + r_list->user_token >> PAGE_SHIFT); +@@ -409,6 +415,13 @@ int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map) + break; + case _DRM_SHM: + vfree(map->handle); ++ if (master) { ++ if (dev->sigdata.lock == master->lock.hw_lock) ++ dev->sigdata.lock = NULL; ++ master->lock.hw_lock = NULL; /* SHM removed */ ++ master->lock.file_priv = NULL; ++ wake_up_interruptible(&master->lock.lock_queue); ++ } + break; + case _DRM_AGP: + case _DRM_SCATTER_GATHER: +@@ -419,11 +432,15 @@ int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map) + dmah.size = map->size; + __drm_pci_free(dev, &dmah); + break; ++ case _DRM_GEM: ++ DRM_ERROR("tried to rmmap GEM object\n"); ++ break; + } + drm_free(map, sizeof(*map), DRM_MEM_MAPS); + + return 0; + } ++EXPORT_SYMBOL(drm_rmmap_locked); + + int drm_rmmap(struct drm_device *dev, drm_local_map_t *map) + { +diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c +index d505f69..809ec0f 100644 +--- a/drivers/gpu/drm/drm_context.c ++++ b/drivers/gpu/drm/drm_context.c +@@ -256,12 +256,13 @@ static int drm_context_switch(struct drm_device * dev, int old, int new) + * hardware lock is held, clears the drm_device::context_flag and wakes up + * drm_device::context_wait. + */ +-static int drm_context_switch_complete(struct drm_device * dev, int new) ++static int drm_context_switch_complete(struct drm_device *dev, ++ struct drm_file *file_priv, int new) + { + dev->last_context = new; /* PRE/POST: This is the _only_ writer. */ + dev->last_switch = jiffies; + +- if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) { ++ if (!_DRM_LOCK_IS_HELD(file_priv->master->lock.hw_lock->lock)) { + DRM_ERROR("Lock isn't held after context switch\n"); + } + +@@ -420,7 +421,7 @@ int drm_newctx(struct drm_device *dev, void *data, + struct drm_ctx *ctx = data; + + DRM_DEBUG("%d\n", ctx->handle); +- drm_context_switch_complete(dev, ctx->handle); ++ drm_context_switch_complete(dev, file_priv, ctx->handle); + + return 0; + } +@@ -442,9 +443,6 @@ int drm_rmctx(struct drm_device *dev, void *data, + struct drm_ctx *ctx = data; + + DRM_DEBUG("%d\n", ctx->handle); +- if (ctx->handle == DRM_KERNEL_CONTEXT + 1) { +- file_priv->remove_auth_on_close = 1; +- } + if (ctx->handle != DRM_KERNEL_CONTEXT) { + if (dev->driver->context_dtor) + dev->driver->context_dtor(dev, ctx->handle); +diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c +new file mode 100644 +index 0000000..53c8725 +--- /dev/null ++++ b/drivers/gpu/drm/drm_crtc.c +@@ -0,0 +1,2446 @@ ++/* ++ * Copyright (c) 2006-2008 Intel Corporation ++ * Copyright (c) 2007 Dave Airlie ++ * Copyright (c) 2008 Red Hat Inc. ++ * ++ * DRM core CRTC related functions ++ * ++ * Permission to use, copy, modify, distribute, and sell this software and its ++ * documentation for any purpose is hereby granted without fee, provided that ++ * the above copyright notice appear in all copies and that both that copyright ++ * notice and this permission notice appear in supporting documentation, and ++ * that the name of the copyright holders not be used in advertising or ++ * publicity pertaining to distribution of the software without specific, ++ * written prior permission. The copyright holders make no representations ++ * about the suitability of this software for any purpose. It is provided "as ++ * is" without express or implied warranty. ++ * ++ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, ++ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO ++ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR ++ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, ++ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER ++ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE ++ * OF THIS SOFTWARE. ++ * ++ * Authors: ++ * Keith Packard ++ * Eric Anholt ++ * Dave Airlie ++ * Jesse Barnes ++ */ ++#include ++#include "drm.h" ++#include "drmP.h" ++#include "drm_crtc.h" ++ ++struct drm_prop_enum_list { ++ int type; ++ char *name; ++}; ++ ++/* Avoid boilerplate. I'm tired of typing. */ ++#define DRM_ENUM_NAME_FN(fnname, list) \ ++ char *fnname(int val) \ ++ { \ ++ int i; \ ++ for (i = 0; i < ARRAY_SIZE(list); i++) { \ ++ if (list[i].type == val) \ ++ return list[i].name; \ ++ } \ ++ return "(unknown)"; \ ++ } ++ ++/* ++ * Global properties ++ */ ++static struct drm_prop_enum_list drm_dpms_enum_list[] = ++{ { DRM_MODE_DPMS_ON, "On" }, ++ { DRM_MODE_DPMS_STANDBY, "Standby" }, ++ { DRM_MODE_DPMS_SUSPEND, "Suspend" }, ++ { DRM_MODE_DPMS_OFF, "Off" } ++}; ++ ++DRM_ENUM_NAME_FN(drm_get_dpms_name, drm_dpms_enum_list) ++ ++/* ++ * Optional properties ++ */ ++static struct drm_prop_enum_list drm_scaling_mode_enum_list[] = ++{ ++ { DRM_MODE_SCALE_NON_GPU, "Non-GPU" }, ++ { DRM_MODE_SCALE_FULLSCREEN, "Fullscreen" }, ++ { DRM_MODE_SCALE_NO_SCALE, "No scale" }, ++ { DRM_MODE_SCALE_ASPECT, "Aspect" }, ++}; ++ ++static struct drm_prop_enum_list drm_dithering_mode_enum_list[] = ++{ ++ { DRM_MODE_DITHERING_OFF, "Off" }, ++ { DRM_MODE_DITHERING_ON, "On" }, ++}; ++ ++/* ++ * Non-global properties, but "required" for certain connectors. ++ */ ++static struct drm_prop_enum_list drm_dvi_i_select_enum_list[] = ++{ ++ { DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */ ++ { DRM_MODE_SUBCONNECTOR_DVID, "DVI-D" }, /* DVI-I */ ++ { DRM_MODE_SUBCONNECTOR_DVIA, "DVI-A" }, /* DVI-I */ ++}; ++ ++DRM_ENUM_NAME_FN(drm_get_dvi_i_select_name, drm_dvi_i_select_enum_list) ++ ++static struct drm_prop_enum_list drm_dvi_i_subconnector_enum_list[] = ++{ ++ { DRM_MODE_SUBCONNECTOR_Unknown, "Unknown" }, /* DVI-I and TV-out */ ++ { DRM_MODE_SUBCONNECTOR_DVID, "DVI-D" }, /* DVI-I */ ++ { DRM_MODE_SUBCONNECTOR_DVIA, "DVI-A" }, /* DVI-I */ ++}; ++ ++DRM_ENUM_NAME_FN(drm_get_dvi_i_subconnector_name, ++ drm_dvi_i_subconnector_enum_list) ++ ++static struct drm_prop_enum_list drm_tv_select_enum_list[] = ++{ ++ { DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */ ++ { DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */ ++ { DRM_MODE_SUBCONNECTOR_SVIDEO, "SVIDEO" }, /* TV-out */ ++ { DRM_MODE_SUBCONNECTOR_Component, "Component" }, /* TV-out */ ++}; ++ ++DRM_ENUM_NAME_FN(drm_get_tv_select_name, drm_tv_select_enum_list) ++ ++static struct drm_prop_enum_list drm_tv_subconnector_enum_list[] = ++{ ++ { DRM_MODE_SUBCONNECTOR_Unknown, "Unknown" }, /* DVI-I and TV-out */ ++ { DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */ ++ { DRM_MODE_SUBCONNECTOR_SVIDEO, "SVIDEO" }, /* TV-out */ ++ { DRM_MODE_SUBCONNECTOR_Component, "Component" }, /* TV-out */ ++}; ++ ++DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name, ++ drm_tv_subconnector_enum_list) ++ ++struct drm_conn_prop_enum_list { ++ int type; ++ char *name; ++ int count; ++}; ++ ++/* ++ * Connector and encoder types. ++ */ ++static struct drm_conn_prop_enum_list drm_connector_enum_list[] = ++{ { DRM_MODE_CONNECTOR_Unknown, "Unknown", 0 }, ++ { DRM_MODE_CONNECTOR_VGA, "VGA", 0 }, ++ { DRM_MODE_CONNECTOR_DVII, "DVI-I", 0 }, ++ { DRM_MODE_CONNECTOR_DVID, "DVI-D", 0 }, ++ { DRM_MODE_CONNECTOR_DVIA, "DVI-A", 0 }, ++ { DRM_MODE_CONNECTOR_Composite, "Composite", 0 }, ++ { DRM_MODE_CONNECTOR_SVIDEO, "SVIDEO", 0 }, ++ { DRM_MODE_CONNECTOR_LVDS, "LVDS", 0 }, ++ { DRM_MODE_CONNECTOR_Component, "Component", 0 }, ++ { DRM_MODE_CONNECTOR_9PinDIN, "9-pin DIN", 0 }, ++ { DRM_MODE_CONNECTOR_DisplayPort, "DisplayPort", 0 }, ++ { DRM_MODE_CONNECTOR_HDMIA, "HDMI Type A", 0 }, ++ { DRM_MODE_CONNECTOR_HDMIB, "HDMI Type B", 0 }, ++}; ++ ++static struct drm_prop_enum_list drm_encoder_enum_list[] = ++{ { DRM_MODE_ENCODER_NONE, "None" }, ++ { DRM_MODE_ENCODER_DAC, "DAC" }, ++ { DRM_MODE_ENCODER_TMDS, "TMDS" }, ++ { DRM_MODE_ENCODER_LVDS, "LVDS" }, ++ { DRM_MODE_ENCODER_TVDAC, "TV" }, ++}; ++ ++char *drm_get_encoder_name(struct drm_encoder *encoder) ++{ ++ static char buf[32]; ++ ++ snprintf(buf, 32, "%s-%d", ++ drm_encoder_enum_list[encoder->encoder_type].name, ++ encoder->base.id); ++ return buf; ++} ++ ++char *drm_get_connector_name(struct drm_connector *connector) ++{ ++ static char buf[32]; ++ ++ snprintf(buf, 32, "%s-%d", ++ drm_connector_enum_list[connector->connector_type].name, ++ connector->connector_type_id); ++ return buf; ++} ++EXPORT_SYMBOL(drm_get_connector_name); ++ ++char *drm_get_connector_status_name(enum drm_connector_status status) ++{ ++ if (status == connector_status_connected) ++ return "connected"; ++ else if (status == connector_status_disconnected) ++ return "disconnected"; ++ else ++ return "unknown"; ++} ++ ++/** ++ * drm_mode_object_get - allocate a new identifier ++ * @dev: DRM device ++ * @ptr: object pointer, used to generate unique ID ++ * @type: object type ++ * ++ * LOCKING: ++ * Caller must hold DRM mode_config lock. ++ * ++ * Create a unique identifier based on @ptr in @dev's identifier space. Used ++ * for tracking modes, CRTCs and connectors. ++ * ++ * RETURNS: ++ * New unique (relative to other objects in @dev) integer identifier for the ++ * object. ++ */ ++static int drm_mode_object_get(struct drm_device *dev, ++ struct drm_mode_object *obj, uint32_t obj_type) ++{ ++ int new_id = 0; ++ int ret; ++ ++ WARN(!mutex_is_locked(&dev->mode_config.mutex), ++ "%s called w/o mode_config lock\n", __FUNCTION__); ++again: ++ if (idr_pre_get(&dev->mode_config.crtc_idr, GFP_KERNEL) == 0) { ++ DRM_ERROR("Ran out memory getting a mode number\n"); ++ return -EINVAL; ++ } ++ ++ ret = idr_get_new_above(&dev->mode_config.crtc_idr, obj, 1, &new_id); ++ if (ret == -EAGAIN) ++ goto again; ++ ++ obj->id = new_id; ++ obj->type = obj_type; ++ return 0; ++} ++ ++/** ++ * drm_mode_object_put - free an identifer ++ * @dev: DRM device ++ * @id: ID to free ++ * ++ * LOCKING: ++ * Caller must hold DRM mode_config lock. ++ * ++ * Free @id from @dev's unique identifier pool. ++ */ ++static void drm_mode_object_put(struct drm_device *dev, ++ struct drm_mode_object *object) ++{ ++ idr_remove(&dev->mode_config.crtc_idr, object->id); ++} ++ ++void *drm_mode_object_find(struct drm_device *dev, uint32_t id, uint32_t type) ++{ ++ struct drm_mode_object *obj; ++ ++ obj = idr_find(&dev->mode_config.crtc_idr, id); ++ if (!obj || (obj->type != type) || (obj->id != id)) ++ return NULL; ++ ++ return obj; ++} ++EXPORT_SYMBOL(drm_mode_object_find); ++ ++/** ++ * drm_crtc_from_fb - find the CRTC structure associated with an fb ++ * @dev: DRM device ++ * @fb: framebuffer in question ++ * ++ * LOCKING: ++ * Caller must hold mode_config lock. ++ * ++ * Find CRTC in the mode_config structure that matches @fb. ++ * ++ * RETURNS: ++ * Pointer to the CRTC or NULL if it wasn't found. ++ */ ++struct drm_crtc *drm_crtc_from_fb(struct drm_device *dev, ++ struct drm_framebuffer *fb) ++{ ++ struct drm_crtc *crtc; ++ ++ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { ++ if (crtc->fb == fb) ++ return crtc; ++ } ++ return NULL; ++} ++ ++/** ++ * drm_framebuffer_init - initialize a framebuffer ++ * @dev: DRM device ++ * ++ * LOCKING: ++ * Caller must hold mode config lock. ++ * ++ * Allocates an ID for the framebuffer's parent mode object, sets its mode ++ * functions & device file and adds it to the master fd list. ++ * ++ * RETURNS: ++ * Zero on success, error code on falure. ++ */ ++int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb, ++ const struct drm_framebuffer_funcs *funcs) ++{ ++ int ret; ++ ++ ret = drm_mode_object_get(dev, &fb->base, DRM_MODE_OBJECT_FB); ++ if (ret) { ++ return ret; ++ } ++ ++ fb->dev = dev; ++ fb->funcs = funcs; ++ dev->mode_config.num_fb++; ++ list_add(&fb->head, &dev->mode_config.fb_list); ++ ++ return 0; ++} ++EXPORT_SYMBOL(drm_framebuffer_init); ++ ++/** ++ * drm_framebuffer_cleanup - remove a framebuffer object ++ * @fb: framebuffer to remove ++ * ++ * LOCKING: ++ * Caller must hold mode config lock. ++ * ++ * Scans all the CRTCs in @dev's mode_config. If they're using @fb, removes ++ * it, setting it to NULL. ++ */ ++void drm_framebuffer_cleanup(struct drm_framebuffer *fb) ++{ ++ struct drm_device *dev = fb->dev; ++ struct drm_crtc *crtc; ++ ++ /* remove from any CRTC */ ++ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { ++ if (crtc->fb == fb) ++ crtc->fb = NULL; ++ } ++ ++ drm_mode_object_put(dev, &fb->base); ++ list_del(&fb->head); ++ dev->mode_config.num_fb--; ++} ++EXPORT_SYMBOL(drm_framebuffer_cleanup); ++ ++/** ++ * drm_crtc_init - Initialise a new CRTC object ++ * @dev: DRM device ++ * @crtc: CRTC object to init ++ * @funcs: callbacks for the new CRTC ++ * ++ * LOCKING: ++ * Caller must hold mode config lock. ++ * ++ * Inits a new object created as base part of an driver crtc object. ++ */ ++void drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, ++ const struct drm_crtc_funcs *funcs) ++{ ++ crtc->dev = dev; ++ crtc->funcs = funcs; ++ ++ mutex_lock(&dev->mode_config.mutex); ++ drm_mode_object_get(dev, &crtc->base, DRM_MODE_OBJECT_CRTC); ++ ++ list_add_tail(&crtc->head, &dev->mode_config.crtc_list); ++ dev->mode_config.num_crtc++; ++ mutex_unlock(&dev->mode_config.mutex); ++} ++EXPORT_SYMBOL(drm_crtc_init); ++ ++/** ++ * drm_crtc_cleanup - Cleans up the core crtc usage. ++ * @crtc: CRTC to cleanup ++ * ++ * LOCKING: ++ * Caller must hold mode config lock. ++ * ++ * Cleanup @crtc. Removes from drm modesetting space ++ * does NOT free object, caller does that. ++ */ ++void drm_crtc_cleanup(struct drm_crtc *crtc) ++{ ++ struct drm_device *dev = crtc->dev; ++ ++ if (crtc->gamma_store) { ++ kfree(crtc->gamma_store); ++ crtc->gamma_store = NULL; ++ } ++ ++ drm_mode_object_put(dev, &crtc->base); ++ list_del(&crtc->head); ++ dev->mode_config.num_crtc--; ++} ++EXPORT_SYMBOL(drm_crtc_cleanup); ++ ++/** ++ * drm_mode_probed_add - add a mode to a connector's probed mode list ++ * @connector: connector the new mode ++ * @mode: mode data ++ * ++ * LOCKING: ++ * Caller must hold mode config lock. ++ * ++ * Add @mode to @connector's mode list for later use. ++ */ ++void drm_mode_probed_add(struct drm_connector *connector, ++ struct drm_display_mode *mode) ++{ ++ list_add(&mode->head, &connector->probed_modes); ++} ++EXPORT_SYMBOL(drm_mode_probed_add); ++ ++/** ++ * drm_mode_remove - remove and free a mode ++ * @connector: connector list to modify ++ * @mode: mode to remove ++ * ++ * LOCKING: ++ * Caller must hold mode config lock. ++ * ++ * Remove @mode from @connector's mode list, then free it. ++ */ ++void drm_mode_remove(struct drm_connector *connector, ++ struct drm_display_mode *mode) ++{ ++ list_del(&mode->head); ++ kfree(mode); ++} ++EXPORT_SYMBOL(drm_mode_remove); ++ ++/** ++ * drm_connector_init - Init a preallocated connector ++ * @dev: DRM device ++ * @connector: the connector to init ++ * @funcs: callbacks for this connector ++ * @name: user visible name of the connector ++ * ++ * LOCKING: ++ * Caller must hold @dev's mode_config lock. ++ * ++ * Initialises a preallocated connector. Connectors should be ++ * subclassed as part of driver connector objects. ++ */ ++void drm_connector_init(struct drm_device *dev, ++ struct drm_connector *connector, ++ const struct drm_connector_funcs *funcs, ++ int connector_type) ++{ ++ mutex_lock(&dev->mode_config.mutex); ++ ++ connector->dev = dev; ++ connector->funcs = funcs; ++ drm_mode_object_get(dev, &connector->base, DRM_MODE_OBJECT_CONNECTOR); ++ connector->connector_type = connector_type; ++ connector->connector_type_id = ++ ++drm_connector_enum_list[connector_type].count; /* TODO */ ++ INIT_LIST_HEAD(&connector->user_modes); ++ INIT_LIST_HEAD(&connector->probed_modes); ++ INIT_LIST_HEAD(&connector->modes); ++ connector->edid_blob_ptr = NULL; ++ ++ list_add_tail(&connector->head, &dev->mode_config.connector_list); ++ dev->mode_config.num_connector++; ++ ++ drm_connector_attach_property(connector, ++ dev->mode_config.edid_property, 0); ++ ++ drm_connector_attach_property(connector, ++ dev->mode_config.dpms_property, 0); ++ ++ mutex_unlock(&dev->mode_config.mutex); ++} ++EXPORT_SYMBOL(drm_connector_init); ++ ++/** ++ * drm_connector_cleanup - cleans up an initialised connector ++ * @connector: connector to cleanup ++ * ++ * LOCKING: ++ * Caller must hold @dev's mode_config lock. ++ * ++ * Cleans up the connector but doesn't free the object. ++ */ ++void drm_connector_cleanup(struct drm_connector *connector) ++{ ++ struct drm_device *dev = connector->dev; ++ struct drm_display_mode *mode, *t; ++ ++ list_for_each_entry_safe(mode, t, &connector->probed_modes, head) ++ drm_mode_remove(connector, mode); ++ ++ list_for_each_entry_safe(mode, t, &connector->modes, head) ++ drm_mode_remove(connector, mode); ++ ++ list_for_each_entry_safe(mode, t, &connector->user_modes, head) ++ drm_mode_remove(connector, mode); ++ ++ mutex_lock(&dev->mode_config.mutex); ++ drm_mode_object_put(dev, &connector->base); ++ list_del(&connector->head); ++ mutex_unlock(&dev->mode_config.mutex); ++} ++EXPORT_SYMBOL(drm_connector_cleanup); ++ ++void drm_encoder_init(struct drm_device *dev, ++ struct drm_encoder *encoder, ++ const struct drm_encoder_funcs *funcs, ++ int encoder_type) ++{ ++ mutex_lock(&dev->mode_config.mutex); ++ ++ encoder->dev = dev; ++ ++ drm_mode_object_get(dev, &encoder->base, DRM_MODE_OBJECT_ENCODER); ++ encoder->encoder_type = encoder_type; ++ encoder->funcs = funcs; ++ ++ list_add_tail(&encoder->head, &dev->mode_config.encoder_list); ++ dev->mode_config.num_encoder++; ++ ++ mutex_unlock(&dev->mode_config.mutex); ++} ++EXPORT_SYMBOL(drm_encoder_init); ++ ++void drm_encoder_cleanup(struct drm_encoder *encoder) ++{ ++ struct drm_device *dev = encoder->dev; ++ mutex_lock(&dev->mode_config.mutex); ++ drm_mode_object_put(dev, &encoder->base); ++ list_del(&encoder->head); ++ mutex_unlock(&dev->mode_config.mutex); ++} ++EXPORT_SYMBOL(drm_encoder_cleanup); ++ ++/** ++ * drm_mode_create - create a new display mode ++ * @dev: DRM device ++ * ++ * LOCKING: ++ * Caller must hold DRM mode_config lock. ++ * ++ * Create a new drm_display_mode, give it an ID, and return it. ++ * ++ * RETURNS: ++ * Pointer to new mode on success, NULL on error. ++ */ ++struct drm_display_mode *drm_mode_create(struct drm_device *dev) ++{ ++ struct drm_display_mode *nmode; ++ ++ nmode = kzalloc(sizeof(struct drm_display_mode), GFP_KERNEL); ++ if (!nmode) ++ return NULL; ++ ++ drm_mode_object_get(dev, &nmode->base, DRM_MODE_OBJECT_MODE); ++ return nmode; ++} ++EXPORT_SYMBOL(drm_mode_create); ++ ++/** ++ * drm_mode_destroy - remove a mode ++ * @dev: DRM device ++ * @mode: mode to remove ++ * ++ * LOCKING: ++ * Caller must hold mode config lock. ++ * ++ * Free @mode's unique identifier, then free it. ++ */ ++void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode) ++{ ++ drm_mode_object_put(dev, &mode->base); ++ ++ kfree(mode); ++} ++EXPORT_SYMBOL(drm_mode_destroy); ++ ++static int drm_mode_create_standard_connector_properties(struct drm_device *dev) ++{ ++ struct drm_property *edid; ++ struct drm_property *dpms; ++ int i; ++ ++ /* ++ * Standard properties (apply to all connectors) ++ */ ++ edid = drm_property_create(dev, DRM_MODE_PROP_BLOB | ++ DRM_MODE_PROP_IMMUTABLE, ++ "EDID", 0); ++ dev->mode_config.edid_property = edid; ++ ++ dpms = drm_property_create(dev, DRM_MODE_PROP_ENUM, ++ "DPMS", ARRAY_SIZE(drm_dpms_enum_list)); ++ for (i = 0; i < ARRAY_SIZE(drm_dpms_enum_list); i++) ++ drm_property_add_enum(dpms, i, drm_dpms_enum_list[i].type, ++ drm_dpms_enum_list[i].name); ++ dev->mode_config.dpms_property = dpms; ++ ++ return 0; ++} ++ ++/** ++ * drm_mode_create_dvi_i_properties - create DVI-I specific connector properties ++ * @dev: DRM device ++ * ++ * Called by a driver the first time a DVI-I connector is made. ++ */ ++int drm_mode_create_dvi_i_properties(struct drm_device *dev) ++{ ++ struct drm_property *dvi_i_selector; ++ struct drm_property *dvi_i_subconnector; ++ int i; ++ ++ if (dev->mode_config.dvi_i_select_subconnector_property) ++ return 0; ++ ++ dvi_i_selector = ++ drm_property_create(dev, DRM_MODE_PROP_ENUM, ++ "select subconnector", ++ ARRAY_SIZE(drm_dvi_i_select_enum_list)); ++ for (i = 0; i < ARRAY_SIZE(drm_dvi_i_select_enum_list); i++) ++ drm_property_add_enum(dvi_i_selector, i, ++ drm_dvi_i_select_enum_list[i].type, ++ drm_dvi_i_select_enum_list[i].name); ++ dev->mode_config.dvi_i_select_subconnector_property = dvi_i_selector; ++ ++ dvi_i_subconnector = ++ drm_property_create(dev, DRM_MODE_PROP_ENUM | ++ DRM_MODE_PROP_IMMUTABLE, ++ "subconnector", ++ ARRAY_SIZE(drm_dvi_i_subconnector_enum_list)); ++ for (i = 0; i < ARRAY_SIZE(drm_dvi_i_subconnector_enum_list); i++) ++ drm_property_add_enum(dvi_i_subconnector, i, ++ drm_dvi_i_subconnector_enum_list[i].type, ++ drm_dvi_i_subconnector_enum_list[i].name); ++ dev->mode_config.dvi_i_subconnector_property = dvi_i_subconnector; ++ ++ return 0; ++} ++EXPORT_SYMBOL(drm_mode_create_dvi_i_properties); ++ ++/** ++ * drm_create_tv_properties - create TV specific connector properties ++ * @dev: DRM device ++ * @num_modes: number of different TV formats (modes) supported ++ * @modes: array of pointers to strings containing name of each format ++ * ++ * Called by a driver's TV initialization routine, this function creates ++ * the TV specific connector properties for a given device. Caller is ++ * responsible for allocating a list of format names and passing them to ++ * this routine. ++ */ ++int drm_mode_create_tv_properties(struct drm_device *dev, int num_modes, ++ char *modes[]) ++{ ++ struct drm_property *tv_selector; ++ struct drm_property *tv_subconnector; ++ int i; ++ ++ if (dev->mode_config.tv_select_subconnector_property) ++ return 0; ++ ++ /* ++ * Basic connector properties ++ */ ++ tv_selector = drm_property_create(dev, DRM_MODE_PROP_ENUM, ++ "select subconnector", ++ ARRAY_SIZE(drm_tv_select_enum_list)); ++ for (i = 0; i < ARRAY_SIZE(drm_tv_select_enum_list); i++) ++ drm_property_add_enum(tv_selector, i, ++ drm_tv_select_enum_list[i].type, ++ drm_tv_select_enum_list[i].name); ++ dev->mode_config.tv_select_subconnector_property = tv_selector; ++ ++ tv_subconnector = ++ drm_property_create(dev, DRM_MODE_PROP_ENUM | ++ DRM_MODE_PROP_IMMUTABLE, "subconnector", ++ ARRAY_SIZE(drm_tv_subconnector_enum_list)); ++ for (i = 0; i < ARRAY_SIZE(drm_tv_subconnector_enum_list); i++) ++ drm_property_add_enum(tv_subconnector, i, ++ drm_tv_subconnector_enum_list[i].type, ++ drm_tv_subconnector_enum_list[i].name); ++ dev->mode_config.tv_subconnector_property = tv_subconnector; ++ ++ /* ++ * Other, TV specific properties: margins & TV modes. ++ */ ++ dev->mode_config.tv_left_margin_property = ++ drm_property_create(dev, DRM_MODE_PROP_RANGE, ++ "left margin", 2); ++ dev->mode_config.tv_left_margin_property->values[0] = 0; ++ dev->mode_config.tv_left_margin_property->values[1] = 100; ++ ++ dev->mode_config.tv_right_margin_property = ++ drm_property_create(dev, DRM_MODE_PROP_RANGE, ++ "right margin", 2); ++ dev->mode_config.tv_right_margin_property->values[0] = 0; ++ dev->mode_config.tv_right_margin_property->values[1] = 100; ++ ++ dev->mode_config.tv_top_margin_property = ++ drm_property_create(dev, DRM_MODE_PROP_RANGE, ++ "top margin", 2); ++ dev->mode_config.tv_top_margin_property->values[0] = 0; ++ dev->mode_config.tv_top_margin_property->values[1] = 100; ++ ++ dev->mode_config.tv_bottom_margin_property = ++ drm_property_create(dev, DRM_MODE_PROP_RANGE, ++ "bottom margin", 2); ++ dev->mode_config.tv_bottom_margin_property->values[0] = 0; ++ dev->mode_config.tv_bottom_margin_property->values[1] = 100; ++ ++ dev->mode_config.tv_mode_property = ++ drm_property_create(dev, DRM_MODE_PROP_ENUM, ++ "mode", num_modes); ++ for (i = 0; i < num_modes; i++) ++ drm_property_add_enum(dev->mode_config.tv_mode_property, i, ++ i, modes[i]); ++ ++ return 0; ++} ++EXPORT_SYMBOL(drm_mode_create_tv_properties); ++ ++/** ++ * drm_mode_create_scaling_mode_property - create scaling mode property ++ * @dev: DRM device ++ * ++ * Called by a driver the first time it's needed, must be attached to desired ++ * connectors. ++ */ ++int drm_mode_create_scaling_mode_property(struct drm_device *dev) ++{ ++ struct drm_property *scaling_mode; ++ int i; ++ ++ if (dev->mode_config.scaling_mode_property) ++ return 0; ++ ++ scaling_mode = ++ drm_property_create(dev, DRM_MODE_PROP_ENUM, "scaling mode", ++ ARRAY_SIZE(drm_scaling_mode_enum_list)); ++ for (i = 0; i < ARRAY_SIZE(drm_scaling_mode_enum_list); i++) ++ drm_property_add_enum(scaling_mode, i, ++ drm_scaling_mode_enum_list[i].type, ++ drm_scaling_mode_enum_list[i].name); ++ ++ dev->mode_config.scaling_mode_property = scaling_mode; ++ ++ return 0; ++} ++EXPORT_SYMBOL(drm_mode_create_scaling_mode_property); ++ ++/** ++ * drm_mode_create_dithering_property - create dithering property ++ * @dev: DRM device ++ * ++ * Called by a driver the first time it's needed, must be attached to desired ++ * connectors. ++ */ ++int drm_mode_create_dithering_property(struct drm_device *dev) ++{ ++ struct drm_property *dithering_mode; ++ int i; ++ ++ if (dev->mode_config.dithering_mode_property) ++ return 0; ++ ++ dithering_mode = ++ drm_property_create(dev, DRM_MODE_PROP_ENUM, "dithering", ++ ARRAY_SIZE(drm_dithering_mode_enum_list)); ++ for (i = 0; i < ARRAY_SIZE(drm_dithering_mode_enum_list); i++) ++ drm_property_add_enum(dithering_mode, i, ++ drm_dithering_mode_enum_list[i].type, ++ drm_dithering_mode_enum_list[i].name); ++ dev->mode_config.dithering_mode_property = dithering_mode; ++ ++ return 0; ++} ++EXPORT_SYMBOL(drm_mode_create_dithering_property); ++ ++/** ++ * drm_mode_config_init - initialize DRM mode_configuration structure ++ * @dev: DRM device ++ * ++ * LOCKING: ++ * None, should happen single threaded at init time. ++ * ++ * Initialize @dev's mode_config structure, used for tracking the graphics ++ * configuration of @dev. ++ */ ++void drm_mode_config_init(struct drm_device *dev) ++{ ++ mutex_init(&dev->mode_config.mutex); ++ INIT_LIST_HEAD(&dev->mode_config.fb_list); ++ INIT_LIST_HEAD(&dev->mode_config.fb_kernel_list); ++ INIT_LIST_HEAD(&dev->mode_config.crtc_list); ++ INIT_LIST_HEAD(&dev->mode_config.connector_list); ++ INIT_LIST_HEAD(&dev->mode_config.encoder_list); ++ INIT_LIST_HEAD(&dev->mode_config.property_list); ++ INIT_LIST_HEAD(&dev->mode_config.property_blob_list); ++ idr_init(&dev->mode_config.crtc_idr); ++ ++ mutex_lock(&dev->mode_config.mutex); ++ drm_mode_create_standard_connector_properties(dev); ++ mutex_unlock(&dev->mode_config.mutex); ++ ++ /* Just to be sure */ ++ dev->mode_config.num_fb = 0; ++ dev->mode_config.num_connector = 0; ++ dev->mode_config.num_crtc = 0; ++ dev->mode_config.num_encoder = 0; ++} ++EXPORT_SYMBOL(drm_mode_config_init); ++ ++int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *group) ++{ ++ uint32_t total_objects = 0; ++ ++ total_objects += dev->mode_config.num_crtc; ++ total_objects += dev->mode_config.num_connector; ++ total_objects += dev->mode_config.num_encoder; ++ ++ if (total_objects == 0) ++ return -EINVAL; ++ ++ group->id_list = kzalloc(total_objects * sizeof(uint32_t), GFP_KERNEL); ++ if (!group->id_list) ++ return -ENOMEM; ++ ++ group->num_crtcs = 0; ++ group->num_connectors = 0; ++ group->num_encoders = 0; ++ return 0; ++} ++ ++int drm_mode_group_init_legacy_group(struct drm_device *dev, ++ struct drm_mode_group *group) ++{ ++ struct drm_crtc *crtc; ++ struct drm_encoder *encoder; ++ struct drm_connector *connector; ++ int ret; ++ ++ if ((ret = drm_mode_group_init(dev, group))) ++ return ret; ++ ++ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) ++ group->id_list[group->num_crtcs++] = crtc->base.id; ++ ++ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) ++ group->id_list[group->num_crtcs + group->num_encoders++] = ++ encoder->base.id; ++ ++ list_for_each_entry(connector, &dev->mode_config.connector_list, head) ++ group->id_list[group->num_crtcs + group->num_encoders + ++ group->num_connectors++] = connector->base.id; ++ ++ return 0; ++} ++ ++/** ++ * drm_mode_config_cleanup - free up DRM mode_config info ++ * @dev: DRM device ++ * ++ * LOCKING: ++ * Caller must hold mode config lock. ++ * ++ * Free up all the connectors and CRTCs associated with this DRM device, then ++ * free up the framebuffers and associated buffer objects. ++ * ++ * FIXME: cleanup any dangling user buffer objects too ++ */ ++void drm_mode_config_cleanup(struct drm_device *dev) ++{ ++ struct drm_connector *connector, *ot; ++ struct drm_crtc *crtc, *ct; ++ struct drm_encoder *encoder, *enct; ++ struct drm_framebuffer *fb, *fbt; ++ struct drm_property *property, *pt; ++ ++ list_for_each_entry_safe(encoder, enct, &dev->mode_config.encoder_list, ++ head) { ++ encoder->funcs->destroy(encoder); ++ } ++ ++ list_for_each_entry_safe(connector, ot, ++ &dev->mode_config.connector_list, head) { ++ connector->funcs->destroy(connector); ++ } ++ ++ list_for_each_entry_safe(property, pt, &dev->mode_config.property_list, ++ head) { ++ drm_property_destroy(dev, property); ++ } ++ ++ list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) { ++ fb->funcs->destroy(fb); ++ } ++ ++ list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) { ++ crtc->funcs->destroy(crtc); ++ } ++ ++} ++EXPORT_SYMBOL(drm_mode_config_cleanup); ++ ++/** ++ * drm_crtc_convert_to_umode - convert a drm_display_mode into a modeinfo ++ * @out: drm_mode_modeinfo struct to return to the user ++ * @in: drm_display_mode to use ++ * ++ * LOCKING: ++ * None. ++ * ++ * Convert a drm_display_mode into a drm_mode_modeinfo structure to return to ++ * the user. ++ */ ++void drm_crtc_convert_to_umode(struct drm_mode_modeinfo *out, ++ struct drm_display_mode *in) ++{ ++ out->clock = in->clock; ++ out->hdisplay = in->hdisplay; ++ out->hsync_start = in->hsync_start; ++ out->hsync_end = in->hsync_end; ++ out->htotal = in->htotal; ++ out->hskew = in->hskew; ++ out->vdisplay = in->vdisplay; ++ out->vsync_start = in->vsync_start; ++ out->vsync_end = in->vsync_end; ++ out->vtotal = in->vtotal; ++ out->vscan = in->vscan; ++ out->vrefresh = in->vrefresh; ++ out->flags = in->flags; ++ out->type = in->type; ++ strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN); ++ out->name[DRM_DISPLAY_MODE_LEN-1] = 0; ++} ++ ++/** ++ * drm_crtc_convert_to_umode - convert a modeinfo into a drm_display_mode ++ * @out: drm_display_mode to return to the user ++ * @in: drm_mode_modeinfo to use ++ * ++ * LOCKING: ++ * None. ++ * ++ * Convert a drm_mode_modeinfo into a drm_display_mode structure to return to ++ * the caller. ++ */ ++void drm_crtc_convert_umode(struct drm_display_mode *out, ++ struct drm_mode_modeinfo *in) ++{ ++ out->clock = in->clock; ++ out->hdisplay = in->hdisplay; ++ out->hsync_start = in->hsync_start; ++ out->hsync_end = in->hsync_end; ++ out->htotal = in->htotal; ++ out->hskew = in->hskew; ++ out->vdisplay = in->vdisplay; ++ out->vsync_start = in->vsync_start; ++ out->vsync_end = in->vsync_end; ++ out->vtotal = in->vtotal; ++ out->vscan = in->vscan; ++ out->vrefresh = in->vrefresh; ++ out->flags = in->flags; ++ out->type = in->type; ++ strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN); ++ out->name[DRM_DISPLAY_MODE_LEN-1] = 0; ++} ++ ++/** ++ * drm_mode_getresources - get graphics configuration ++ * @inode: inode from the ioctl ++ * @filp: file * from the ioctl ++ * @cmd: cmd from ioctl ++ * @arg: arg from ioctl ++ * ++ * LOCKING: ++ * Takes mode config lock. ++ * ++ * Construct a set of configuration description structures and return ++ * them to the user, including CRTC, connector and framebuffer configuration. ++ * ++ * Called by the user via ioctl. ++ * ++ * RETURNS: ++ * Zero on success, errno on failure. ++ */ ++int drm_mode_getresources(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_mode_card_res *card_res = data; ++ struct list_head *lh; ++ struct drm_framebuffer *fb; ++ struct drm_connector *connector; ++ struct drm_crtc *crtc; ++ struct drm_encoder *encoder; ++ int ret = 0; ++ int connector_count = 0; ++ int crtc_count = 0; ++ int fb_count = 0; ++ int encoder_count = 0; ++ int copied = 0, i; ++ uint32_t __user *fb_id; ++ uint32_t __user *crtc_id; ++ uint32_t __user *connector_id; ++ uint32_t __user *encoder_id; ++ struct drm_mode_group *mode_group; ++ ++ mutex_lock(&dev->mode_config.mutex); ++ ++ /* ++ * For the non-control nodes we need to limit the list of resources ++ * by IDs in the group list for this node ++ */ ++ list_for_each(lh, &file_priv->fbs) ++ fb_count++; ++ ++ mode_group = &file_priv->master->minor->mode_group; ++ if (file_priv->master->minor->type == DRM_MINOR_CONTROL) { ++ ++ list_for_each(lh, &dev->mode_config.crtc_list) ++ crtc_count++; ++ ++ list_for_each(lh, &dev->mode_config.connector_list) ++ connector_count++; ++ ++ list_for_each(lh, &dev->mode_config.encoder_list) ++ encoder_count++; ++ } else { ++ ++ crtc_count = mode_group->num_crtcs; ++ connector_count = mode_group->num_connectors; ++ encoder_count = mode_group->num_encoders; ++ } ++ ++ card_res->max_height = dev->mode_config.max_height; ++ card_res->min_height = dev->mode_config.min_height; ++ card_res->max_width = dev->mode_config.max_width; ++ card_res->min_width = dev->mode_config.min_width; ++ ++ /* handle this in 4 parts */ ++ /* FBs */ ++ if (card_res->count_fbs >= fb_count) { ++ copied = 0; ++ fb_id = (uint32_t __user *)(unsigned long)card_res->fb_id_ptr; ++ list_for_each_entry(fb, &file_priv->fbs, head) { ++ if (put_user(fb->base.id, fb_id + copied)) { ++ ret = -EFAULT; ++ goto out; ++ } ++ copied++; ++ } ++ } ++ card_res->count_fbs = fb_count; ++ ++ /* CRTCs */ ++ if (card_res->count_crtcs >= crtc_count) { ++ copied = 0; ++ crtc_id = (uint32_t __user *)(unsigned long)card_res->crtc_id_ptr; ++ if (file_priv->master->minor->type == DRM_MINOR_CONTROL) { ++ list_for_each_entry(crtc, &dev->mode_config.crtc_list, ++ head) { ++ DRM_DEBUG("CRTC ID is %d\n", crtc->base.id); ++ if (put_user(crtc->base.id, crtc_id + copied)) { ++ ret = -EFAULT; ++ goto out; ++ } ++ copied++; ++ } ++ } else { ++ for (i = 0; i < mode_group->num_crtcs; i++) { ++ if (put_user(mode_group->id_list[i], ++ crtc_id + copied)) { ++ ret = -EFAULT; ++ goto out; ++ } ++ copied++; ++ } ++ } ++ } ++ card_res->count_crtcs = crtc_count; ++ ++ /* Encoders */ ++ if (card_res->count_encoders >= encoder_count) { ++ copied = 0; ++ encoder_id = (uint32_t __user *)(unsigned long)card_res->encoder_id_ptr; ++ if (file_priv->master->minor->type == DRM_MINOR_CONTROL) { ++ list_for_each_entry(encoder, ++ &dev->mode_config.encoder_list, ++ head) { ++ DRM_DEBUG("ENCODER ID is %d\n", ++ encoder->base.id); ++ if (put_user(encoder->base.id, encoder_id + ++ copied)) { ++ ret = -EFAULT; ++ goto out; ++ } ++ copied++; ++ } ++ } else { ++ for (i = mode_group->num_crtcs; i < mode_group->num_crtcs + mode_group->num_encoders; i++) { ++ if (put_user(mode_group->id_list[i], ++ encoder_id + copied)) { ++ ret = -EFAULT; ++ goto out; ++ } ++ copied++; ++ } ++ ++ } ++ } ++ card_res->count_encoders = encoder_count; ++ ++ /* Connectors */ ++ if (card_res->count_connectors >= connector_count) { ++ copied = 0; ++ connector_id = (uint32_t __user *)(unsigned long)card_res->connector_id_ptr; ++ if (file_priv->master->minor->type == DRM_MINOR_CONTROL) { ++ list_for_each_entry(connector, ++ &dev->mode_config.connector_list, ++ head) { ++ DRM_DEBUG("CONNECTOR ID is %d\n", ++ connector->base.id); ++ if (put_user(connector->base.id, ++ connector_id + copied)) { ++ ret = -EFAULT; ++ goto out; ++ } ++ copied++; ++ } ++ } else { ++ int start = mode_group->num_crtcs + ++ mode_group->num_encoders; ++ for (i = start; i < start + mode_group->num_connectors; i++) { ++ if (put_user(mode_group->id_list[i], ++ connector_id + copied)) { ++ ret = -EFAULT; ++ goto out; ++ } ++ copied++; ++ } ++ } ++ } ++ card_res->count_connectors = connector_count; ++ ++ DRM_DEBUG("Counted %d %d %d\n", card_res->count_crtcs, ++ card_res->count_connectors, card_res->count_encoders); ++ ++out: ++ mutex_unlock(&dev->mode_config.mutex); ++ return ret; ++} ++ ++/** ++ * drm_mode_getcrtc - get CRTC configuration ++ * @inode: inode from the ioctl ++ * @filp: file * from the ioctl ++ * @cmd: cmd from ioctl ++ * @arg: arg from ioctl ++ * ++ * LOCKING: ++ * Caller? (FIXME) ++ * ++ * Construct a CRTC configuration structure to return to the user. ++ * ++ * Called by the user via ioctl. ++ * ++ * RETURNS: ++ * Zero on success, errno on failure. ++ */ ++int drm_mode_getcrtc(struct drm_device *dev, ++ void *data, struct drm_file *file_priv) ++{ ++ struct drm_mode_crtc *crtc_resp = data; ++ struct drm_crtc *crtc; ++ struct drm_mode_object *obj; ++ int ret = 0; ++ ++ mutex_lock(&dev->mode_config.mutex); ++ ++ obj = drm_mode_object_find(dev, crtc_resp->crtc_id, ++ DRM_MODE_OBJECT_CRTC); ++ if (!obj) { ++ ret = -EINVAL; ++ goto out; ++ } ++ crtc = obj_to_crtc(obj); ++ ++ crtc_resp->x = crtc->x; ++ crtc_resp->y = crtc->y; ++ crtc_resp->gamma_size = crtc->gamma_size; ++ if (crtc->fb) ++ crtc_resp->fb_id = crtc->fb->base.id; ++ else ++ crtc_resp->fb_id = 0; ++ ++ if (crtc->enabled) { ++ ++ drm_crtc_convert_to_umode(&crtc_resp->mode, &crtc->mode); ++ crtc_resp->mode_valid = 1; ++ ++ } else { ++ crtc_resp->mode_valid = 0; ++ } ++ ++out: ++ mutex_unlock(&dev->mode_config.mutex); ++ return ret; ++} ++ ++/** ++ * drm_mode_getconnector - get connector configuration ++ * @inode: inode from the ioctl ++ * @filp: file * from the ioctl ++ * @cmd: cmd from ioctl ++ * @arg: arg from ioctl ++ * ++ * LOCKING: ++ * Caller? (FIXME) ++ * ++ * Construct a connector configuration structure to return to the user. ++ * ++ * Called by the user via ioctl. ++ * ++ * RETURNS: ++ * Zero on success, errno on failure. ++ */ ++int drm_mode_getconnector(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_mode_get_connector *out_resp = data; ++ struct drm_mode_object *obj; ++ struct drm_connector *connector; ++ struct drm_display_mode *mode; ++ int mode_count = 0; ++ int props_count = 0; ++ int encoders_count = 0; ++ int ret = 0; ++ int copied = 0; ++ int i; ++ struct drm_mode_modeinfo u_mode; ++ struct drm_mode_modeinfo __user *mode_ptr; ++ uint32_t __user *prop_ptr; ++ uint64_t __user *prop_values; ++ uint32_t __user *encoder_ptr; ++ ++ memset(&u_mode, 0, sizeof(struct drm_mode_modeinfo)); ++ ++ DRM_DEBUG("connector id %d:\n", out_resp->connector_id); ++ ++ mutex_lock(&dev->mode_config.mutex); ++ ++ obj = drm_mode_object_find(dev, out_resp->connector_id, ++ DRM_MODE_OBJECT_CONNECTOR); ++ if (!obj) { ++ ret = -EINVAL; ++ goto out; ++ } ++ connector = obj_to_connector(obj); ++ ++ for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) { ++ if (connector->property_ids[i] != 0) { ++ props_count++; ++ } ++ } ++ ++ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { ++ if (connector->encoder_ids[i] != 0) { ++ encoders_count++; ++ } ++ } ++ ++ if (out_resp->count_modes == 0) { ++ connector->funcs->fill_modes(connector, ++ dev->mode_config.max_width, ++ dev->mode_config.max_height); ++ } ++ ++ /* delayed so we get modes regardless of pre-fill_modes state */ ++ list_for_each_entry(mode, &connector->modes, head) ++ mode_count++; ++ ++ out_resp->connector_id = connector->base.id; ++ out_resp->connector_type = connector->connector_type; ++ out_resp->connector_type_id = connector->connector_type_id; ++ out_resp->mm_width = connector->display_info.width_mm; ++ out_resp->mm_height = connector->display_info.height_mm; ++ out_resp->subpixel = connector->display_info.subpixel_order; ++ out_resp->connection = connector->status; ++ if (connector->encoder) ++ out_resp->encoder_id = connector->encoder->base.id; ++ else ++ out_resp->encoder_id = 0; ++ ++ /* ++ * This ioctl is called twice, once to determine how much space is ++ * needed, and the 2nd time to fill it. ++ */ ++ if ((out_resp->count_modes >= mode_count) && mode_count) { ++ copied = 0; ++ mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr; ++ list_for_each_entry(mode, &connector->modes, head) { ++ drm_crtc_convert_to_umode(&u_mode, mode); ++ if (copy_to_user(mode_ptr + copied, ++ &u_mode, sizeof(u_mode))) { ++ ret = -EFAULT; ++ goto out; ++ } ++ copied++; ++ } ++ } ++ out_resp->count_modes = mode_count; ++ ++ if ((out_resp->count_props >= props_count) && props_count) { ++ copied = 0; ++ prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr); ++ prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr); ++ for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) { ++ if (connector->property_ids[i] != 0) { ++ if (put_user(connector->property_ids[i], ++ prop_ptr + copied)) { ++ ret = -EFAULT; ++ goto out; ++ } ++ ++ if (put_user(connector->property_values[i], ++ prop_values + copied)) { ++ ret = -EFAULT; ++ goto out; ++ } ++ copied++; ++ } ++ } ++ } ++ out_resp->count_props = props_count; ++ ++ if ((out_resp->count_encoders >= encoders_count) && encoders_count) { ++ copied = 0; ++ encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr); ++ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { ++ if (connector->encoder_ids[i] != 0) { ++ if (put_user(connector->encoder_ids[i], ++ encoder_ptr + copied)) { ++ ret = -EFAULT; ++ goto out; ++ } ++ copied++; ++ } ++ } ++ } ++ out_resp->count_encoders = encoders_count; ++ ++out: ++ mutex_unlock(&dev->mode_config.mutex); ++ return ret; ++} ++ ++int drm_mode_getencoder(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_mode_get_encoder *enc_resp = data; ++ struct drm_mode_object *obj; ++ struct drm_encoder *encoder; ++ int ret = 0; ++ ++ mutex_lock(&dev->mode_config.mutex); ++ obj = drm_mode_object_find(dev, enc_resp->encoder_id, ++ DRM_MODE_OBJECT_ENCODER); ++ if (!obj) { ++ ret = -EINVAL; ++ goto out; ++ } ++ encoder = obj_to_encoder(obj); ++ ++ if (encoder->crtc) ++ enc_resp->crtc_id = encoder->crtc->base.id; ++ else ++ enc_resp->crtc_id = 0; ++ enc_resp->encoder_type = encoder->encoder_type; ++ enc_resp->encoder_id = encoder->base.id; ++ enc_resp->possible_crtcs = encoder->possible_crtcs; ++ enc_resp->possible_clones = encoder->possible_clones; ++ ++out: ++ mutex_unlock(&dev->mode_config.mutex); ++ return ret; ++} ++ ++/** ++ * drm_mode_setcrtc - set CRTC configuration ++ * @inode: inode from the ioctl ++ * @filp: file * from the ioctl ++ * @cmd: cmd from ioctl ++ * @arg: arg from ioctl ++ * ++ * LOCKING: ++ * Caller? (FIXME) ++ * ++ * Build a new CRTC configuration based on user request. ++ * ++ * Called by the user via ioctl. ++ * ++ * RETURNS: ++ * Zero on success, errno on failure. ++ */ ++int drm_mode_setcrtc(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_mode_config *config = &dev->mode_config; ++ struct drm_mode_crtc *crtc_req = data; ++ struct drm_mode_object *obj; ++ struct drm_crtc *crtc, *crtcfb; ++ struct drm_connector **connector_set = NULL, *connector; ++ struct drm_framebuffer *fb = NULL; ++ struct drm_display_mode *mode = NULL; ++ struct drm_mode_set set; ++ uint32_t __user *set_connectors_ptr; ++ int ret = 0; ++ int i; ++ ++ mutex_lock(&dev->mode_config.mutex); ++ obj = drm_mode_object_find(dev, crtc_req->crtc_id, ++ DRM_MODE_OBJECT_CRTC); ++ if (!obj) { ++ DRM_DEBUG("Unknown CRTC ID %d\n", crtc_req->crtc_id); ++ ret = -EINVAL; ++ goto out; ++ } ++ crtc = obj_to_crtc(obj); ++ ++ if (crtc_req->mode_valid) { ++ /* If we have a mode we need a framebuffer. */ ++ /* If we pass -1, set the mode with the currently bound fb */ ++ if (crtc_req->fb_id == -1) { ++ list_for_each_entry(crtcfb, ++ &dev->mode_config.crtc_list, head) { ++ if (crtcfb == crtc) { ++ DRM_DEBUG("Using current fb for setmode\n"); ++ fb = crtc->fb; ++ } ++ } ++ } else { ++ obj = drm_mode_object_find(dev, crtc_req->fb_id, ++ DRM_MODE_OBJECT_FB); ++ if (!obj) { ++ DRM_DEBUG("Unknown FB ID%d\n", crtc_req->fb_id); ++ ret = -EINVAL; ++ goto out; ++ } ++ fb = obj_to_fb(obj); ++ } ++ ++ mode = drm_mode_create(dev); ++ drm_crtc_convert_umode(mode, &crtc_req->mode); ++ drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V); ++ } ++ ++ if (crtc_req->count_connectors == 0 && mode) { ++ DRM_DEBUG("Count connectors is 0 but mode set\n"); ++ ret = -EINVAL; ++ goto out; ++ } ++ ++ if (crtc_req->count_connectors > 0 && !mode && !fb) { ++ DRM_DEBUG("Count connectors is %d but no mode or fb set\n", ++ crtc_req->count_connectors); ++ ret = -EINVAL; ++ goto out; ++ } ++ ++ if (crtc_req->count_connectors > 0) { ++ u32 out_id; ++ ++ /* Avoid unbounded kernel memory allocation */ ++ if (crtc_req->count_connectors > config->num_connector) { ++ ret = -EINVAL; ++ goto out; ++ } ++ ++ connector_set = kmalloc(crtc_req->count_connectors * ++ sizeof(struct drm_connector *), ++ GFP_KERNEL); ++ if (!connector_set) { ++ ret = -ENOMEM; ++ goto out; ++ } ++ ++ for (i = 0; i < crtc_req->count_connectors; i++) { ++ set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr; ++ if (get_user(out_id, &set_connectors_ptr[i])) { ++ ret = -EFAULT; ++ goto out; ++ } ++ ++ obj = drm_mode_object_find(dev, out_id, ++ DRM_MODE_OBJECT_CONNECTOR); ++ if (!obj) { ++ DRM_DEBUG("Connector id %d unknown\n", out_id); ++ ret = -EINVAL; ++ goto out; ++ } ++ connector = obj_to_connector(obj); ++ ++ connector_set[i] = connector; ++ } ++ } ++ ++ set.crtc = crtc; ++ set.x = crtc_req->x; ++ set.y = crtc_req->y; ++ set.mode = mode; ++ set.connectors = connector_set; ++ set.num_connectors = crtc_req->count_connectors; ++ set.fb =fb; ++ ret = crtc->funcs->set_config(&set); ++ ++out: ++ kfree(connector_set); ++ mutex_unlock(&dev->mode_config.mutex); ++ return ret; ++} ++ ++int drm_mode_cursor_ioctl(struct drm_device *dev, ++ void *data, struct drm_file *file_priv) ++{ ++ struct drm_mode_cursor *req = data; ++ struct drm_mode_object *obj; ++ struct drm_crtc *crtc; ++ int ret = 0; ++ ++ DRM_DEBUG("\n"); ++ ++ if (!req->flags) { ++ DRM_ERROR("no operation set\n"); ++ return -EINVAL; ++ } ++ ++ mutex_lock(&dev->mode_config.mutex); ++ obj = drm_mode_object_find(dev, req->crtc_id, DRM_MODE_OBJECT_CRTC); ++ if (!obj) { ++ DRM_DEBUG("Unknown CRTC ID %d\n", req->crtc_id); ++ ret = -EINVAL; ++ goto out; ++ } ++ crtc = obj_to_crtc(obj); ++ ++ if (req->flags & DRM_MODE_CURSOR_BO) { ++ if (!crtc->funcs->cursor_set) { ++ DRM_ERROR("crtc does not support cursor\n"); ++ ret = -ENXIO; ++ goto out; ++ } ++ /* Turns off the cursor if handle is 0 */ ++ ret = crtc->funcs->cursor_set(crtc, file_priv, req->handle, ++ req->width, req->height); ++ } ++ ++ if (req->flags & DRM_MODE_CURSOR_MOVE) { ++ if (crtc->funcs->cursor_move) { ++ ret = crtc->funcs->cursor_move(crtc, req->x, req->y); ++ } else { ++ DRM_ERROR("crtc does not support cursor\n"); ++ ret = -EFAULT; ++ goto out; ++ } ++ } ++out: ++ mutex_unlock(&dev->mode_config.mutex); ++ return ret; ++} ++ ++/** ++ * drm_mode_addfb - add an FB to the graphics configuration ++ * @inode: inode from the ioctl ++ * @filp: file * from the ioctl ++ * @cmd: cmd from ioctl ++ * @arg: arg from ioctl ++ * ++ * LOCKING: ++ * Takes mode config lock. ++ * ++ * Add a new FB to the specified CRTC, given a user request. ++ * ++ * Called by the user via ioctl. ++ * ++ * RETURNS: ++ * Zero on success, errno on failure. ++ */ ++int drm_mode_addfb(struct drm_device *dev, ++ void *data, struct drm_file *file_priv) ++{ ++ struct drm_mode_fb_cmd *r = data; ++ struct drm_mode_config *config = &dev->mode_config; ++ struct drm_framebuffer *fb; ++ int ret = 0; ++ ++ if ((config->min_width > r->width) || (r->width > config->max_width)) { ++ DRM_ERROR("mode new framebuffer width not within limits\n"); ++ return -EINVAL; ++ } ++ if ((config->min_height > r->height) || (r->height > config->max_height)) { ++ DRM_ERROR("mode new framebuffer height not within limits\n"); ++ return -EINVAL; ++ } ++ ++ mutex_lock(&dev->mode_config.mutex); ++ ++ /* TODO check buffer is sufficently large */ ++ /* TODO setup destructor callback */ ++ ++ fb = dev->mode_config.funcs->fb_create(dev, file_priv, r); ++ if (!fb) { ++ DRM_ERROR("could not create framebuffer\n"); ++ ret = -EINVAL; ++ goto out; ++ } ++ ++ r->fb_id = fb->base.id; ++ list_add(&fb->filp_head, &file_priv->fbs); ++ ++out: ++ mutex_unlock(&dev->mode_config.mutex); ++ return ret; ++} ++ ++/** ++ * drm_mode_rmfb - remove an FB from the configuration ++ * @inode: inode from the ioctl ++ * @filp: file * from the ioctl ++ * @cmd: cmd from ioctl ++ * @arg: arg from ioctl ++ * ++ * LOCKING: ++ * Takes mode config lock. ++ * ++ * Remove the FB specified by the user. ++ * ++ * Called by the user via ioctl. ++ * ++ * RETURNS: ++ * Zero on success, errno on failure. ++ */ ++int drm_mode_rmfb(struct drm_device *dev, ++ void *data, struct drm_file *file_priv) ++{ ++ struct drm_mode_object *obj; ++ struct drm_framebuffer *fb = NULL; ++ struct drm_framebuffer *fbl = NULL; ++ uint32_t *id = data; ++ int ret = 0; ++ int found = 0; ++ ++ mutex_lock(&dev->mode_config.mutex); ++ obj = drm_mode_object_find(dev, *id, DRM_MODE_OBJECT_FB); ++ /* TODO check that we realy get a framebuffer back. */ ++ if (!obj) { ++ DRM_ERROR("mode invalid framebuffer id\n"); ++ ret = -EINVAL; ++ goto out; ++ } ++ fb = obj_to_fb(obj); ++ ++ list_for_each_entry(fbl, &file_priv->fbs, filp_head) ++ if (fb == fbl) ++ found = 1; ++ ++ if (!found) { ++ DRM_ERROR("tried to remove a fb that we didn't own\n"); ++ ret = -EINVAL; ++ goto out; ++ } ++ ++ /* TODO release all crtc connected to the framebuffer */ ++ /* TODO unhock the destructor from the buffer object */ ++ ++ list_del(&fb->filp_head); ++ fb->funcs->destroy(fb); ++ ++out: ++ mutex_unlock(&dev->mode_config.mutex); ++ return ret; ++} ++ ++/** ++ * drm_mode_getfb - get FB info ++ * @inode: inode from the ioctl ++ * @filp: file * from the ioctl ++ * @cmd: cmd from ioctl ++ * @arg: arg from ioctl ++ * ++ * LOCKING: ++ * Caller? (FIXME) ++ * ++ * Lookup the FB given its ID and return info about it. ++ * ++ * Called by the user via ioctl. ++ * ++ * RETURNS: ++ * Zero on success, errno on failure. ++ */ ++int drm_mode_getfb(struct drm_device *dev, ++ void *data, struct drm_file *file_priv) ++{ ++ struct drm_mode_fb_cmd *r = data; ++ struct drm_mode_object *obj; ++ struct drm_framebuffer *fb; ++ int ret = 0; ++ ++ mutex_lock(&dev->mode_config.mutex); ++ obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB); ++ if (!obj) { ++ DRM_ERROR("invalid framebuffer id\n"); ++ ret = -EINVAL; ++ goto out; ++ } ++ fb = obj_to_fb(obj); ++ ++ r->height = fb->height; ++ r->width = fb->width; ++ r->depth = fb->depth; ++ r->bpp = fb->bits_per_pixel; ++ r->pitch = fb->pitch; ++ fb->funcs->create_handle(fb, file_priv, &r->handle); ++ ++out: ++ mutex_unlock(&dev->mode_config.mutex); ++ return ret; ++} ++ ++/** ++ * drm_fb_release - remove and free the FBs on this file ++ * @filp: file * from the ioctl ++ * ++ * LOCKING: ++ * Takes mode config lock. ++ * ++ * Destroy all the FBs associated with @filp. ++ * ++ * Called by the user via ioctl. ++ * ++ * RETURNS: ++ * Zero on success, errno on failure. ++ */ ++void drm_fb_release(struct file *filp) ++{ ++ struct drm_file *priv = filp->private_data; ++ struct drm_device *dev = priv->minor->dev; ++ struct drm_framebuffer *fb, *tfb; ++ ++ mutex_lock(&dev->mode_config.mutex); ++ list_for_each_entry_safe(fb, tfb, &priv->fbs, filp_head) { ++ list_del(&fb->filp_head); ++ fb->funcs->destroy(fb); ++ } ++ mutex_unlock(&dev->mode_config.mutex); ++} ++ ++/** ++ * drm_mode_attachmode - add a mode to the user mode list ++ * @dev: DRM device ++ * @connector: connector to add the mode to ++ * @mode: mode to add ++ * ++ * Add @mode to @connector's user mode list. ++ */ ++static int drm_mode_attachmode(struct drm_device *dev, ++ struct drm_connector *connector, ++ struct drm_display_mode *mode) ++{ ++ int ret = 0; ++ ++ list_add_tail(&mode->head, &connector->user_modes); ++ return ret; ++} ++ ++int drm_mode_attachmode_crtc(struct drm_device *dev, struct drm_crtc *crtc, ++ struct drm_display_mode *mode) ++{ ++ struct drm_connector *connector; ++ int ret = 0; ++ struct drm_display_mode *dup_mode; ++ int need_dup = 0; ++ list_for_each_entry(connector, &dev->mode_config.connector_list, head) { ++ if (!connector->encoder) ++ break; ++ if (connector->encoder->crtc == crtc) { ++ if (need_dup) ++ dup_mode = drm_mode_duplicate(dev, mode); ++ else ++ dup_mode = mode; ++ ret = drm_mode_attachmode(dev, connector, dup_mode); ++ if (ret) ++ return ret; ++ need_dup = 1; ++ } ++ } ++ return 0; ++} ++EXPORT_SYMBOL(drm_mode_attachmode_crtc); ++ ++static int drm_mode_detachmode(struct drm_device *dev, ++ struct drm_connector *connector, ++ struct drm_display_mode *mode) ++{ ++ int found = 0; ++ int ret = 0; ++ struct drm_display_mode *match_mode, *t; ++ ++ list_for_each_entry_safe(match_mode, t, &connector->user_modes, head) { ++ if (drm_mode_equal(match_mode, mode)) { ++ list_del(&match_mode->head); ++ drm_mode_destroy(dev, match_mode); ++ found = 1; ++ break; ++ } ++ } ++ ++ if (!found) ++ ret = -EINVAL; ++ ++ return ret; ++} ++ ++int drm_mode_detachmode_crtc(struct drm_device *dev, struct drm_display_mode *mode) ++{ ++ struct drm_connector *connector; ++ ++ list_for_each_entry(connector, &dev->mode_config.connector_list, head) { ++ drm_mode_detachmode(dev, connector, mode); ++ } ++ return 0; ++} ++EXPORT_SYMBOL(drm_mode_detachmode_crtc); ++ ++/** ++ * drm_fb_attachmode - Attach a user mode to an connector ++ * @inode: inode from the ioctl ++ * @filp: file * from the ioctl ++ * @cmd: cmd from ioctl ++ * @arg: arg from ioctl ++ * ++ * This attaches a user specified mode to an connector. ++ * Called by the user via ioctl. ++ * ++ * RETURNS: ++ * Zero on success, errno on failure. ++ */ ++int drm_mode_attachmode_ioctl(struct drm_device *dev, ++ void *data, struct drm_file *file_priv) ++{ ++ struct drm_mode_mode_cmd *mode_cmd = data; ++ struct drm_connector *connector; ++ struct drm_display_mode *mode; ++ struct drm_mode_object *obj; ++ struct drm_mode_modeinfo *umode = &mode_cmd->mode; ++ int ret = 0; ++ ++ mutex_lock(&dev->mode_config.mutex); ++ ++ obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR); ++ if (!obj) { ++ ret = -EINVAL; ++ goto out; ++ } ++ connector = obj_to_connector(obj); ++ ++ mode = drm_mode_create(dev); ++ if (!mode) { ++ ret = -ENOMEM; ++ goto out; ++ } ++ ++ drm_crtc_convert_umode(mode, umode); ++ ++ ret = drm_mode_attachmode(dev, connector, mode); ++out: ++ mutex_unlock(&dev->mode_config.mutex); ++ return ret; ++} ++ ++ ++/** ++ * drm_fb_detachmode - Detach a user specified mode from an connector ++ * @inode: inode from the ioctl ++ * @filp: file * from the ioctl ++ * @cmd: cmd from ioctl ++ * @arg: arg from ioctl ++ * ++ * Called by the user via ioctl. ++ * ++ * RETURNS: ++ * Zero on success, errno on failure. ++ */ ++int drm_mode_detachmode_ioctl(struct drm_device *dev, ++ void *data, struct drm_file *file_priv) ++{ ++ struct drm_mode_object *obj; ++ struct drm_mode_mode_cmd *mode_cmd = data; ++ struct drm_connector *connector; ++ struct drm_display_mode mode; ++ struct drm_mode_modeinfo *umode = &mode_cmd->mode; ++ int ret = 0; ++ ++ mutex_lock(&dev->mode_config.mutex); ++ ++ obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR); ++ if (!obj) { ++ ret = -EINVAL; ++ goto out; ++ } ++ connector = obj_to_connector(obj); ++ ++ drm_crtc_convert_umode(&mode, umode); ++ ret = drm_mode_detachmode(dev, connector, &mode); ++out: ++ mutex_unlock(&dev->mode_config.mutex); ++ return ret; ++} ++ ++struct drm_property *drm_property_create(struct drm_device *dev, int flags, ++ const char *name, int num_values) ++{ ++ struct drm_property *property = NULL; ++ ++ property = kzalloc(sizeof(struct drm_property), GFP_KERNEL); ++ if (!property) ++ return NULL; ++ ++ if (num_values) { ++ property->values = kzalloc(sizeof(uint64_t)*num_values, GFP_KERNEL); ++ if (!property->values) ++ goto fail; ++ } ++ ++ drm_mode_object_get(dev, &property->base, DRM_MODE_OBJECT_PROPERTY); ++ property->flags = flags; ++ property->num_values = num_values; ++ INIT_LIST_HEAD(&property->enum_blob_list); ++ ++ if (name) ++ strncpy(property->name, name, DRM_PROP_NAME_LEN); ++ ++ list_add_tail(&property->head, &dev->mode_config.property_list); ++ return property; ++fail: ++ kfree(property); ++ return NULL; ++} ++EXPORT_SYMBOL(drm_property_create); ++ ++int drm_property_add_enum(struct drm_property *property, int index, ++ uint64_t value, const char *name) ++{ ++ struct drm_property_enum *prop_enum; ++ ++ if (!(property->flags & DRM_MODE_PROP_ENUM)) ++ return -EINVAL; ++ ++ if (!list_empty(&property->enum_blob_list)) { ++ list_for_each_entry(prop_enum, &property->enum_blob_list, head) { ++ if (prop_enum->value == value) { ++ strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN); ++ prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0'; ++ return 0; ++ } ++ } ++ } ++ ++ prop_enum = kzalloc(sizeof(struct drm_property_enum), GFP_KERNEL); ++ if (!prop_enum) ++ return -ENOMEM; ++ ++ strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN); ++ prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0'; ++ prop_enum->value = value; ++ ++ property->values[index] = value; ++ list_add_tail(&prop_enum->head, &property->enum_blob_list); ++ return 0; ++} ++EXPORT_SYMBOL(drm_property_add_enum); ++ ++void drm_property_destroy(struct drm_device *dev, struct drm_property *property) ++{ ++ struct drm_property_enum *prop_enum, *pt; ++ ++ list_for_each_entry_safe(prop_enum, pt, &property->enum_blob_list, head) { ++ list_del(&prop_enum->head); ++ kfree(prop_enum); ++ } ++ ++ if (property->num_values) ++ kfree(property->values); ++ drm_mode_object_put(dev, &property->base); ++ list_del(&property->head); ++ kfree(property); ++} ++EXPORT_SYMBOL(drm_property_destroy); ++ ++int drm_connector_attach_property(struct drm_connector *connector, ++ struct drm_property *property, uint64_t init_val) ++{ ++ int i; ++ ++ for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) { ++ if (connector->property_ids[i] == 0) { ++ connector->property_ids[i] = property->base.id; ++ connector->property_values[i] = init_val; ++ break; ++ } ++ } ++ ++ if (i == DRM_CONNECTOR_MAX_PROPERTY) ++ return -EINVAL; ++ return 0; ++} ++EXPORT_SYMBOL(drm_connector_attach_property); ++ ++int drm_connector_property_set_value(struct drm_connector *connector, ++ struct drm_property *property, uint64_t value) ++{ ++ int i; ++ ++ for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) { ++ if (connector->property_ids[i] == property->base.id) { ++ connector->property_values[i] = value; ++ break; ++ } ++ } ++ ++ if (i == DRM_CONNECTOR_MAX_PROPERTY) ++ return -EINVAL; ++ return 0; ++} ++EXPORT_SYMBOL(drm_connector_property_set_value); ++ ++int drm_connector_property_get_value(struct drm_connector *connector, ++ struct drm_property *property, uint64_t *val) ++{ ++ int i; ++ ++ for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) { ++ if (connector->property_ids[i] == property->base.id) { ++ *val = connector->property_values[i]; ++ break; ++ } ++ } ++ ++ if (i == DRM_CONNECTOR_MAX_PROPERTY) ++ return -EINVAL; ++ return 0; ++} ++EXPORT_SYMBOL(drm_connector_property_get_value); ++ ++int drm_mode_getproperty_ioctl(struct drm_device *dev, ++ void *data, struct drm_file *file_priv) ++{ ++ struct drm_mode_object *obj; ++ struct drm_mode_get_property *out_resp = data; ++ struct drm_property *property; ++ int enum_count = 0; ++ int blob_count = 0; ++ int value_count = 0; ++ int ret = 0, i; ++ int copied; ++ struct drm_property_enum *prop_enum; ++ struct drm_mode_property_enum __user *enum_ptr; ++ struct drm_property_blob *prop_blob; ++ uint32_t *blob_id_ptr; ++ uint64_t __user *values_ptr; ++ uint32_t __user *blob_length_ptr; ++ ++ mutex_lock(&dev->mode_config.mutex); ++ obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY); ++ if (!obj) { ++ ret = -EINVAL; ++ goto done; ++ } ++ property = obj_to_property(obj); ++ ++ if (property->flags & DRM_MODE_PROP_ENUM) { ++ list_for_each_entry(prop_enum, &property->enum_blob_list, head) ++ enum_count++; ++ } else if (property->flags & DRM_MODE_PROP_BLOB) { ++ list_for_each_entry(prop_blob, &property->enum_blob_list, head) ++ blob_count++; ++ } ++ ++ value_count = property->num_values; ++ ++ strncpy(out_resp->name, property->name, DRM_PROP_NAME_LEN); ++ out_resp->name[DRM_PROP_NAME_LEN-1] = 0; ++ out_resp->flags = property->flags; ++ ++ if ((out_resp->count_values >= value_count) && value_count) { ++ values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr; ++ for (i = 0; i < value_count; i++) { ++ if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) { ++ ret = -EFAULT; ++ goto done; ++ } ++ } ++ } ++ out_resp->count_values = value_count; ++ ++ if (property->flags & DRM_MODE_PROP_ENUM) { ++ if ((out_resp->count_enum_blobs >= enum_count) && enum_count) { ++ copied = 0; ++ enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr; ++ list_for_each_entry(prop_enum, &property->enum_blob_list, head) { ++ ++ if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) { ++ ret = -EFAULT; ++ goto done; ++ } ++ ++ if (copy_to_user(&enum_ptr[copied].name, ++ &prop_enum->name, DRM_PROP_NAME_LEN)) { ++ ret = -EFAULT; ++ goto done; ++ } ++ copied++; ++ } ++ } ++ out_resp->count_enum_blobs = enum_count; ++ } ++ ++ if (property->flags & DRM_MODE_PROP_BLOB) { ++ if ((out_resp->count_enum_blobs >= blob_count) && blob_count) { ++ copied = 0; ++ blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr; ++ blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr; ++ ++ list_for_each_entry(prop_blob, &property->enum_blob_list, head) { ++ if (put_user(prop_blob->base.id, blob_id_ptr + copied)) { ++ ret = -EFAULT; ++ goto done; ++ } ++ ++ if (put_user(prop_blob->length, blob_length_ptr + copied)) { ++ ret = -EFAULT; ++ goto done; ++ } ++ ++ copied++; ++ } ++ } ++ out_resp->count_enum_blobs = blob_count; ++ } ++done: ++ mutex_unlock(&dev->mode_config.mutex); ++ return ret; ++} ++ ++static struct drm_property_blob *drm_property_create_blob(struct drm_device *dev, int length, ++ void *data) ++{ ++ struct drm_property_blob *blob; ++ ++ if (!length || !data) ++ return NULL; ++ ++ blob = kzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL); ++ if (!blob) ++ return NULL; ++ ++ blob->data = (void *)((char *)blob + sizeof(struct drm_property_blob)); ++ blob->length = length; ++ ++ memcpy(blob->data, data, length); ++ ++ drm_mode_object_get(dev, &blob->base, DRM_MODE_OBJECT_BLOB); ++ ++ list_add_tail(&blob->head, &dev->mode_config.property_blob_list); ++ return blob; ++} ++ ++static void drm_property_destroy_blob(struct drm_device *dev, ++ struct drm_property_blob *blob) ++{ ++ drm_mode_object_put(dev, &blob->base); ++ list_del(&blob->head); ++ kfree(blob); ++} ++ ++int drm_mode_getblob_ioctl(struct drm_device *dev, ++ void *data, struct drm_file *file_priv) ++{ ++ struct drm_mode_object *obj; ++ struct drm_mode_get_blob *out_resp = data; ++ struct drm_property_blob *blob; ++ int ret = 0; ++ void *blob_ptr; ++ ++ mutex_lock(&dev->mode_config.mutex); ++ obj = drm_mode_object_find(dev, out_resp->blob_id, DRM_MODE_OBJECT_BLOB); ++ if (!obj) { ++ ret = -EINVAL; ++ goto done; ++ } ++ blob = obj_to_blob(obj); ++ ++ if (out_resp->length == blob->length) { ++ blob_ptr = (void *)(unsigned long)out_resp->data; ++ if (copy_to_user(blob_ptr, blob->data, blob->length)){ ++ ret = -EFAULT; ++ goto done; ++ } ++ } ++ out_resp->length = blob->length; ++ ++done: ++ mutex_unlock(&dev->mode_config.mutex); ++ return ret; ++} ++ ++int drm_mode_connector_update_edid_property(struct drm_connector *connector, ++ struct edid *edid) ++{ ++ struct drm_device *dev = connector->dev; ++ int ret = 0; ++ ++ if (connector->edid_blob_ptr) ++ drm_property_destroy_blob(dev, connector->edid_blob_ptr); ++ ++ /* Delete edid, when there is none. */ ++ if (!edid) { ++ connector->edid_blob_ptr = NULL; ++ ret = drm_connector_property_set_value(connector, dev->mode_config.edid_property, 0); ++ return ret; ++ } ++ ++ connector->edid_blob_ptr = drm_property_create_blob(connector->dev, 128, edid); ++ ++ ret = drm_connector_property_set_value(connector, ++ dev->mode_config.edid_property, ++ connector->edid_blob_ptr->base.id); ++ ++ return ret; ++} ++EXPORT_SYMBOL(drm_mode_connector_update_edid_property); ++ ++int drm_mode_connector_property_set_ioctl(struct drm_device *dev, ++ void *data, struct drm_file *file_priv) ++{ ++ struct drm_mode_connector_set_property *out_resp = data; ++ struct drm_mode_object *obj; ++ struct drm_property *property; ++ struct drm_connector *connector; ++ int ret = -EINVAL; ++ int i; ++ ++ mutex_lock(&dev->mode_config.mutex); ++ ++ obj = drm_mode_object_find(dev, out_resp->connector_id, DRM_MODE_OBJECT_CONNECTOR); ++ if (!obj) { ++ goto out; ++ } ++ connector = obj_to_connector(obj); ++ ++ for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) { ++ if (connector->property_ids[i] == out_resp->prop_id) ++ break; ++ } ++ ++ if (i == DRM_CONNECTOR_MAX_PROPERTY) { ++ goto out; ++ } ++ ++ obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY); ++ if (!obj) { ++ goto out; ++ } ++ property = obj_to_property(obj); ++ ++ if (property->flags & DRM_MODE_PROP_IMMUTABLE) ++ goto out; ++ ++ if (property->flags & DRM_MODE_PROP_RANGE) { ++ if (out_resp->value < property->values[0]) ++ goto out; ++ ++ if (out_resp->value > property->values[1]) ++ goto out; ++ } else { ++ int found = 0; ++ for (i = 0; i < property->num_values; i++) { ++ if (property->values[i] == out_resp->value) { ++ found = 1; ++ break; ++ } ++ } ++ if (!found) { ++ goto out; ++ } ++ } ++ ++ if (connector->funcs->set_property) ++ ret = connector->funcs->set_property(connector, property, out_resp->value); ++ ++ /* store the property value if succesful */ ++ if (!ret) ++ drm_connector_property_set_value(connector, property, out_resp->value); ++out: ++ mutex_unlock(&dev->mode_config.mutex); ++ return ret; ++} ++ ++int drm_mode_connector_attach_encoder(struct drm_connector *connector, ++ struct drm_encoder *encoder) ++{ ++ int i; ++ ++ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { ++ if (connector->encoder_ids[i] == 0) { ++ connector->encoder_ids[i] = encoder->base.id; ++ return 0; ++ } ++ } ++ return -ENOMEM; ++} ++EXPORT_SYMBOL(drm_mode_connector_attach_encoder); ++ ++void drm_mode_connector_detach_encoder(struct drm_connector *connector, ++ struct drm_encoder *encoder) ++{ ++ int i; ++ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { ++ if (connector->encoder_ids[i] == encoder->base.id) { ++ connector->encoder_ids[i] = 0; ++ if (connector->encoder == encoder) ++ connector->encoder = NULL; ++ break; ++ } ++ } ++} ++EXPORT_SYMBOL(drm_mode_connector_detach_encoder); ++ ++bool drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc, ++ int gamma_size) ++{ ++ crtc->gamma_size = gamma_size; ++ ++ crtc->gamma_store = kzalloc(gamma_size * sizeof(uint16_t) * 3, GFP_KERNEL); ++ if (!crtc->gamma_store) { ++ crtc->gamma_size = 0; ++ return false; ++ } ++ ++ return true; ++} ++EXPORT_SYMBOL(drm_mode_crtc_set_gamma_size); ++ ++int drm_mode_gamma_set_ioctl(struct drm_device *dev, ++ void *data, struct drm_file *file_priv) ++{ ++ struct drm_mode_crtc_lut *crtc_lut = data; ++ struct drm_mode_object *obj; ++ struct drm_crtc *crtc; ++ void *r_base, *g_base, *b_base; ++ int size; ++ int ret = 0; ++ ++ mutex_lock(&dev->mode_config.mutex); ++ obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC); ++ if (!obj) { ++ ret = -EINVAL; ++ goto out; ++ } ++ crtc = obj_to_crtc(obj); ++ ++ /* memcpy into gamma store */ ++ if (crtc_lut->gamma_size != crtc->gamma_size) { ++ ret = -EINVAL; ++ goto out; ++ } ++ ++ size = crtc_lut->gamma_size * (sizeof(uint16_t)); ++ r_base = crtc->gamma_store; ++ if (copy_from_user(r_base, (void __user *)(unsigned long)crtc_lut->red, size)) { ++ ret = -EFAULT; ++ goto out; ++ } ++ ++ g_base = r_base + size; ++ if (copy_from_user(g_base, (void __user *)(unsigned long)crtc_lut->green, size)) { ++ ret = -EFAULT; ++ goto out; ++ } ++ ++ b_base = g_base + size; ++ if (copy_from_user(b_base, (void __user *)(unsigned long)crtc_lut->blue, size)) { ++ ret = -EFAULT; ++ goto out; ++ } ++ ++ crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, crtc->gamma_size); ++ ++out: ++ mutex_unlock(&dev->mode_config.mutex); ++ return ret; ++ ++} ++ ++int drm_mode_gamma_get_ioctl(struct drm_device *dev, ++ void *data, struct drm_file *file_priv) ++{ ++ struct drm_mode_crtc_lut *crtc_lut = data; ++ struct drm_mode_object *obj; ++ struct drm_crtc *crtc; ++ void *r_base, *g_base, *b_base; ++ int size; ++ int ret = 0; ++ ++ mutex_lock(&dev->mode_config.mutex); ++ obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC); ++ if (!obj) { ++ ret = -EINVAL; ++ goto out; ++ } ++ crtc = obj_to_crtc(obj); ++ ++ /* memcpy into gamma store */ ++ if (crtc_lut->gamma_size != crtc->gamma_size) { ++ ret = -EINVAL; ++ goto out; ++ } ++ ++ size = crtc_lut->gamma_size * (sizeof(uint16_t)); ++ r_base = crtc->gamma_store; ++ if (copy_to_user((void __user *)(unsigned long)crtc_lut->red, r_base, size)) { ++ ret = -EFAULT; ++ goto out; ++ } ++ ++ g_base = r_base + size; ++ if (copy_to_user((void __user *)(unsigned long)crtc_lut->green, g_base, size)) { ++ ret = -EFAULT; ++ goto out; ++ } ++ ++ b_base = g_base + size; ++ if (copy_to_user((void __user *)(unsigned long)crtc_lut->blue, b_base, size)) { ++ ret = -EFAULT; ++ goto out; ++ } ++out: ++ mutex_unlock(&dev->mode_config.mutex); ++ return ret; ++} +diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c +new file mode 100644 +index 0000000..58e3359 +--- /dev/null ++++ b/drivers/gpu/drm/drm_crtc_helper.c +@@ -0,0 +1,820 @@ ++/* ++ * Copyright (c) 2006-2008 Intel Corporation ++ * Copyright (c) 2007 Dave Airlie ++ * ++ * DRM core CRTC related functions ++ * ++ * Permission to use, copy, modify, distribute, and sell this software and its ++ * documentation for any purpose is hereby granted without fee, provided that ++ * the above copyright notice appear in all copies and that both that copyright ++ * notice and this permission notice appear in supporting documentation, and ++ * that the name of the copyright holders not be used in advertising or ++ * publicity pertaining to distribution of the software without specific, ++ * written prior permission. The copyright holders make no representations ++ * about the suitability of this software for any purpose. It is provided "as ++ * is" without express or implied warranty. ++ * ++ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, ++ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO ++ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR ++ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, ++ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER ++ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE ++ * OF THIS SOFTWARE. ++ * ++ * Authors: ++ * Keith Packard ++ * Eric Anholt ++ * Dave Airlie ++ * Jesse Barnes ++ */ ++ ++#include "drmP.h" ++#include "drm_crtc.h" ++#include "drm_crtc_helper.h" ++ ++/* ++ * Detailed mode info for 800x600@60Hz ++ */ ++static struct drm_display_mode std_mode[] = { ++ { DRM_MODE("800x600", DRM_MODE_TYPE_DEFAULT, 40000, 800, 840, ++ 968, 1056, 0, 600, 601, 605, 628, 0, ++ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, ++}; ++ ++/** ++ * drm_helper_probe_connector_modes - get complete set of display modes ++ * @dev: DRM device ++ * @maxX: max width for modes ++ * @maxY: max height for modes ++ * ++ * LOCKING: ++ * Caller must hold mode config lock. ++ * ++ * Based on @dev's mode_config layout, scan all the connectors and try to detect ++ * modes on them. Modes will first be added to the connector's probed_modes ++ * list, then culled (based on validity and the @maxX, @maxY parameters) and ++ * put into the normal modes list. ++ * ++ * Intended to be used either at bootup time or when major configuration ++ * changes have occurred. ++ * ++ * FIXME: take into account monitor limits ++ */ ++void drm_helper_probe_single_connector_modes(struct drm_connector *connector, ++ uint32_t maxX, uint32_t maxY) ++{ ++ struct drm_device *dev = connector->dev; ++ struct drm_display_mode *mode, *t; ++ struct drm_connector_helper_funcs *connector_funcs = ++ connector->helper_private; ++ int ret; ++ ++ DRM_DEBUG("%s\n", drm_get_connector_name(connector)); ++ /* set all modes to the unverified state */ ++ list_for_each_entry_safe(mode, t, &connector->modes, head) ++ mode->status = MODE_UNVERIFIED; ++ ++ connector->status = connector->funcs->detect(connector); ++ ++ if (connector->status == connector_status_disconnected) { ++ DRM_DEBUG("%s is disconnected\n", ++ drm_get_connector_name(connector)); ++ /* TODO set EDID to NULL */ ++ return; ++ } ++ ++ ret = (*connector_funcs->get_modes)(connector); ++ ++ if (ret) { ++ drm_mode_connector_list_update(connector); ++ } ++ ++ if (maxX && maxY) ++ drm_mode_validate_size(dev, &connector->modes, maxX, ++ maxY, 0); ++ list_for_each_entry_safe(mode, t, &connector->modes, head) { ++ if (mode->status == MODE_OK) ++ mode->status = connector_funcs->mode_valid(connector, ++ mode); ++ } ++ ++ ++ drm_mode_prune_invalid(dev, &connector->modes, true); ++ ++ if (list_empty(&connector->modes)) { ++ struct drm_display_mode *stdmode; ++ ++ DRM_DEBUG("No valid modes on %s\n", ++ drm_get_connector_name(connector)); ++ ++ /* Should we do this here ??? ++ * When no valid EDID modes are available we end up ++ * here and bailed in the past, now we add a standard ++ * 640x480@60Hz mode and carry on. ++ */ ++ stdmode = drm_mode_duplicate(dev, &std_mode[0]); ++ drm_mode_probed_add(connector, stdmode); ++ drm_mode_list_concat(&connector->probed_modes, ++ &connector->modes); ++ ++ DRM_DEBUG("Adding standard 640x480 @ 60Hz to %s\n", ++ drm_get_connector_name(connector)); ++ } ++ ++ drm_mode_sort(&connector->modes); ++ ++ DRM_DEBUG("Probed modes for %s\n", drm_get_connector_name(connector)); ++ list_for_each_entry_safe(mode, t, &connector->modes, head) { ++ mode->vrefresh = drm_mode_vrefresh(mode); ++ ++ drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V); ++ drm_mode_debug_printmodeline(mode); ++ } ++} ++EXPORT_SYMBOL(drm_helper_probe_single_connector_modes); ++ ++void drm_helper_probe_connector_modes(struct drm_device *dev, uint32_t maxX, ++ uint32_t maxY) ++{ ++ struct drm_connector *connector; ++ ++ list_for_each_entry(connector, &dev->mode_config.connector_list, head) { ++ drm_helper_probe_single_connector_modes(connector, maxX, maxY); ++ } ++} ++EXPORT_SYMBOL(drm_helper_probe_connector_modes); ++ ++ ++/** ++ * drm_helper_crtc_in_use - check if a given CRTC is in a mode_config ++ * @crtc: CRTC to check ++ * ++ * LOCKING: ++ * Caller must hold mode config lock. ++ * ++ * Walk @crtc's DRM device's mode_config and see if it's in use. ++ * ++ * RETURNS: ++ * True if @crtc is part of the mode_config, false otherwise. ++ */ ++bool drm_helper_crtc_in_use(struct drm_crtc *crtc) ++{ ++ struct drm_encoder *encoder; ++ struct drm_device *dev = crtc->dev; ++ /* FIXME: Locking around list access? */ ++ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) ++ if (encoder->crtc == crtc) ++ return true; ++ return false; ++} ++EXPORT_SYMBOL(drm_helper_crtc_in_use); ++ ++/** ++ * drm_disable_unused_functions - disable unused objects ++ * @dev: DRM device ++ * ++ * LOCKING: ++ * Caller must hold mode config lock. ++ * ++ * If an connector or CRTC isn't part of @dev's mode_config, it can be disabled ++ * by calling its dpms function, which should power it off. ++ */ ++void drm_helper_disable_unused_functions(struct drm_device *dev) ++{ ++ struct drm_encoder *encoder; ++ struct drm_encoder_helper_funcs *encoder_funcs; ++ struct drm_crtc *crtc; ++ ++ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { ++ encoder_funcs = encoder->helper_private; ++ if (!encoder->crtc) ++ (*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF); ++ } ++ ++ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { ++ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; ++ crtc->enabled = drm_helper_crtc_in_use(crtc); ++ if (!crtc->enabled) { ++ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); ++ crtc->fb = NULL; ++ } ++ } ++} ++EXPORT_SYMBOL(drm_helper_disable_unused_functions); ++ ++static struct drm_display_mode *drm_has_preferred_mode(struct drm_connector *connector, int width, int height) ++{ ++ struct drm_display_mode *mode; ++ ++ list_for_each_entry(mode, &connector->modes, head) { ++ if (drm_mode_width(mode) > width || ++ drm_mode_height(mode) > height) ++ continue; ++ if (mode->type & DRM_MODE_TYPE_PREFERRED) ++ return mode; ++ } ++ return NULL; ++} ++ ++static bool drm_connector_enabled(struct drm_connector *connector, bool strict) ++{ ++ bool enable; ++ ++ if (strict) { ++ enable = connector->status == connector_status_connected; ++ } else { ++ enable = connector->status != connector_status_disconnected; ++ } ++ return enable; ++} ++ ++static void drm_enable_connectors(struct drm_device *dev, bool *enabled) ++{ ++ bool any_enabled = false; ++ struct drm_connector *connector; ++ int i = 0; ++ ++ list_for_each_entry(connector, &dev->mode_config.connector_list, head) { ++ enabled[i] = drm_connector_enabled(connector, true); ++ any_enabled |= enabled[i]; ++ i++; ++ } ++ ++ if (any_enabled) ++ return; ++ ++ i = 0; ++ list_for_each_entry(connector, &dev->mode_config.connector_list, head) { ++ enabled[i] = drm_connector_enabled(connector, false); ++ i++; ++ } ++} ++ ++static bool drm_target_preferred(struct drm_device *dev, ++ struct drm_display_mode **modes, ++ bool *enabled, int width, int height) ++{ ++ struct drm_connector *connector; ++ int i = 0; ++ ++ list_for_each_entry(connector, &dev->mode_config.connector_list, head) { ++ ++ if (enabled[i] == false) { ++ i++; ++ continue; ++ } ++ ++ modes[i] = drm_has_preferred_mode(connector, width, height); ++ if (!modes[i]) { ++ list_for_each_entry(modes[i], &connector->modes, head) ++ break; ++ } ++ i++; ++ } ++ return true; ++} ++ ++static int drm_pick_crtcs(struct drm_device *dev, ++ struct drm_crtc **best_crtcs, ++ struct drm_display_mode **modes, ++ int n, int width, int height) ++{ ++ int c, o; ++ struct drm_connector *connector; ++ struct drm_connector_helper_funcs *connector_funcs; ++ struct drm_encoder *encoder; ++ struct drm_crtc *best_crtc; ++ int my_score, best_score, score; ++ struct drm_crtc **crtcs, *crtc; ++ ++ if (n == dev->mode_config.num_connector) ++ return 0; ++ c = 0; ++ list_for_each_entry(connector, &dev->mode_config.connector_list, head) { ++ if (c == n) ++ break; ++ c++; ++ } ++ ++ best_crtcs[n] = NULL; ++ best_crtc = NULL; ++ best_score = drm_pick_crtcs(dev, best_crtcs, modes, n+1, width, height); ++ if (modes[n] == NULL) ++ return best_score; ++ ++ crtcs = kmalloc(dev->mode_config.num_connector * ++ sizeof(struct drm_crtc *), GFP_KERNEL); ++ if (!crtcs) ++ return best_score; ++ ++ my_score = 1; ++ if (connector->status == connector_status_connected) ++ my_score++; ++ if (drm_has_preferred_mode(connector, width, height)) ++ my_score++; ++ ++ connector_funcs = connector->helper_private; ++ encoder = connector_funcs->best_encoder(connector); ++ if (!encoder) ++ goto out; ++ ++ connector->encoder = encoder; ++ ++ /* select a crtc for this connector and then attempt to configure ++ remaining connectors */ ++ c = 0; ++ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { ++ ++ if ((connector->encoder->possible_crtcs & (1 << c)) == 0) { ++ c++; ++ continue; ++ } ++ ++ for (o = 0; o < n; o++) ++ if (best_crtcs[o] == crtc) ++ break; ++ ++ if (o < n) { ++ /* ignore cloning for now */ ++ c++; ++ continue; ++ } ++ ++ crtcs[n] = crtc; ++ memcpy(crtcs, best_crtcs, n * sizeof(struct drm_crtc *)); ++ score = my_score + drm_pick_crtcs(dev, crtcs, modes, n + 1, ++ width, height); ++ if (score > best_score) { ++ best_crtc = crtc; ++ best_score = score; ++ memcpy(best_crtcs, crtcs, ++ dev->mode_config.num_connector * ++ sizeof(struct drm_crtc *)); ++ } ++ c++; ++ } ++out: ++ kfree(crtcs); ++ return best_score; ++} ++ ++static void drm_setup_crtcs(struct drm_device *dev) ++{ ++ struct drm_crtc **crtcs; ++ struct drm_display_mode **modes; ++ struct drm_encoder *encoder; ++ struct drm_connector *connector; ++ bool *enabled; ++ int width, height; ++ int i, ret; ++ ++ width = dev->mode_config.max_width; ++ height = dev->mode_config.max_height; ++ ++ /* clean out all the encoder/crtc combos */ ++ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { ++ encoder->crtc = NULL; ++ } ++ ++ crtcs = kcalloc(dev->mode_config.num_connector, ++ sizeof(struct drm_crtc *), GFP_KERNEL); ++ modes = kcalloc(dev->mode_config.num_connector, ++ sizeof(struct drm_display_mode *), GFP_KERNEL); ++ enabled = kcalloc(dev->mode_config.num_connector, ++ sizeof(bool), GFP_KERNEL); ++ ++ drm_enable_connectors(dev, enabled); ++ ++ ret = drm_target_preferred(dev, modes, enabled, width, height); ++ if (!ret) ++ DRM_ERROR("Unable to find initial modes\n"); ++ ++ drm_pick_crtcs(dev, crtcs, modes, 0, width, height); ++ ++ i = 0; ++ list_for_each_entry(connector, &dev->mode_config.connector_list, head) { ++ struct drm_display_mode *mode = modes[i]; ++ struct drm_crtc *crtc = crtcs[i]; ++ ++ if (connector->encoder == NULL) { ++ i++; ++ continue; ++ } ++ ++ if (mode && crtc) { ++ crtc->desired_mode = mode; ++ connector->encoder->crtc = crtc; ++ } else ++ connector->encoder->crtc = NULL; ++ i++; ++ } ++ ++ kfree(crtcs); ++ kfree(modes); ++ kfree(enabled); ++} ++/** ++ * drm_crtc_set_mode - set a mode ++ * @crtc: CRTC to program ++ * @mode: mode to use ++ * @x: width of mode ++ * @y: height of mode ++ * ++ * LOCKING: ++ * Caller must hold mode config lock. ++ * ++ * Try to set @mode on @crtc. Give @crtc and its associated connectors a chance ++ * to fixup or reject the mode prior to trying to set it. ++ * ++ * RETURNS: ++ * True if the mode was set successfully, or false otherwise. ++ */ ++bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, ++ struct drm_display_mode *mode, ++ int x, int y) ++{ ++ struct drm_device *dev = crtc->dev; ++ struct drm_display_mode *adjusted_mode, saved_mode; ++ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; ++ struct drm_encoder_helper_funcs *encoder_funcs; ++ int saved_x, saved_y; ++ struct drm_encoder *encoder; ++ bool ret = true; ++ ++ adjusted_mode = drm_mode_duplicate(dev, mode); ++ ++ crtc->enabled = drm_helper_crtc_in_use(crtc); ++ ++ if (!crtc->enabled) ++ return true; ++ ++ saved_mode = crtc->mode; ++ saved_x = crtc->x; ++ saved_y = crtc->y; ++ ++ /* Update crtc values up front so the driver can rely on them for mode ++ * setting. ++ */ ++ crtc->mode = *mode; ++ crtc->x = x; ++ crtc->y = y; ++ ++ if (drm_mode_equal(&saved_mode, &crtc->mode)) { ++ if (saved_x != crtc->x || saved_y != crtc->y) { ++ crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y); ++ goto done; ++ } ++ } ++ ++ /* Pass our mode to the connectors and the CRTC to give them a chance to ++ * adjust it according to limitations or connector properties, and also ++ * a chance to reject the mode entirely. ++ */ ++ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { ++ ++ if (encoder->crtc != crtc) ++ continue; ++ encoder_funcs = encoder->helper_private; ++ if (!(ret = encoder_funcs->mode_fixup(encoder, mode, ++ adjusted_mode))) { ++ goto done; ++ } ++ } ++ ++ if (!(ret = crtc_funcs->mode_fixup(crtc, mode, adjusted_mode))) { ++ goto done; ++ } ++ ++ /* Prepare the encoders and CRTCs before setting the mode. */ ++ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { ++ ++ if (encoder->crtc != crtc) ++ continue; ++ encoder_funcs = encoder->helper_private; ++ /* Disable the encoders as the first thing we do. */ ++ encoder_funcs->prepare(encoder); ++ } ++ ++ crtc_funcs->prepare(crtc); ++ ++ /* Set up the DPLL and any encoders state that needs to adjust or depend ++ * on the DPLL. ++ */ ++ crtc_funcs->mode_set(crtc, mode, adjusted_mode, x, y); ++ ++ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { ++ ++ if (encoder->crtc != crtc) ++ continue; ++ ++ DRM_INFO("%s: set mode %s %x\n", drm_get_encoder_name(encoder), ++ mode->name, mode->base.id); ++ encoder_funcs = encoder->helper_private; ++ encoder_funcs->mode_set(encoder, mode, adjusted_mode); ++ } ++ ++ /* Now enable the clocks, plane, pipe, and connectors that we set up. */ ++ crtc_funcs->commit(crtc); ++ ++ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { ++ ++ if (encoder->crtc != crtc) ++ continue; ++ ++ encoder_funcs = encoder->helper_private; ++ encoder_funcs->commit(encoder); ++ ++ } ++ ++ /* XXX free adjustedmode */ ++ drm_mode_destroy(dev, adjusted_mode); ++ /* FIXME: add subpixel order */ ++done: ++ if (!ret) { ++ crtc->mode = saved_mode; ++ crtc->x = saved_x; ++ crtc->y = saved_y; ++ } ++ ++ return ret; ++} ++EXPORT_SYMBOL(drm_crtc_helper_set_mode); ++ ++ ++/** ++ * drm_crtc_helper_set_config - set a new config from userspace ++ * @crtc: CRTC to setup ++ * @crtc_info: user provided configuration ++ * @new_mode: new mode to set ++ * @connector_set: set of connectors for the new config ++ * @fb: new framebuffer ++ * ++ * LOCKING: ++ * Caller must hold mode config lock. ++ * ++ * Setup a new configuration, provided by the user in @crtc_info, and enable ++ * it. ++ * ++ * RETURNS: ++ * Zero. (FIXME) ++ */ ++int drm_crtc_helper_set_config(struct drm_mode_set *set) ++{ ++ struct drm_device *dev; ++ struct drm_crtc **save_crtcs, *new_crtc; ++ struct drm_encoder **save_encoders, *new_encoder; ++ bool save_enabled; ++ bool changed = false; ++ bool flip_or_move = false; ++ struct drm_connector *connector; ++ int count = 0, ro, fail = 0; ++ struct drm_crtc_helper_funcs *crtc_funcs; ++ int ret = 0; ++ ++ DRM_DEBUG("\n"); ++ ++ if (!set) ++ return -EINVAL; ++ ++ if (!set->crtc) ++ return -EINVAL; ++ ++ if (!set->crtc->helper_private) ++ return -EINVAL; ++ ++ crtc_funcs = set->crtc->helper_private; ++ ++ DRM_DEBUG("crtc: %p %d fb: %p connectors: %p num_connectors: %d (x, y) (%i, %i)\n", ++ set->crtc, set->crtc->base.id, set->fb, set->connectors, ++ (int)set->num_connectors, set->x, set->y); ++ ++ dev = set->crtc->dev; ++ ++ /* save previous config */ ++ save_enabled = set->crtc->enabled; ++ ++ /* this is meant to be num_connector not num_crtc */ ++ save_crtcs = kzalloc(dev->mode_config.num_connector * ++ sizeof(struct drm_crtc *), GFP_KERNEL); ++ if (!save_crtcs) ++ return -ENOMEM; ++ ++ save_encoders = kzalloc(dev->mode_config.num_connector * ++ sizeof(struct drm_encoders *), GFP_KERNEL); ++ if (!save_encoders) { ++ kfree(save_crtcs); ++ return -ENOMEM; ++ } ++ ++ /* We should be able to check here if the fb has the same properties ++ * and then just flip_or_move it */ ++ if (set->crtc->fb != set->fb) { ++ /* if we have no fb then its a change not a flip */ ++ if (set->crtc->fb == NULL) ++ changed = true; ++ else ++ flip_or_move = true; ++ } ++ ++ if (set->x != set->crtc->x || set->y != set->crtc->y) ++ flip_or_move = true; ++ ++ if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) { ++ DRM_DEBUG("modes are different\n"); ++ drm_mode_debug_printmodeline(&set->crtc->mode); ++ drm_mode_debug_printmodeline(set->mode); ++ changed = true; ++ } ++ ++ /* a) traverse passed in connector list and get encoders for them */ ++ count = 0; ++ list_for_each_entry(connector, &dev->mode_config.connector_list, head) { ++ struct drm_connector_helper_funcs *connector_funcs = ++ connector->helper_private; ++ save_encoders[count++] = connector->encoder; ++ new_encoder = connector->encoder; ++ for (ro = 0; ro < set->num_connectors; ro++) { ++ if (set->connectors[ro] == connector) { ++ new_encoder = connector_funcs->best_encoder(connector); ++ /* if we can't get an encoder for a connector ++ we are setting now - then fail */ ++ if (new_encoder == NULL) ++ /* don't break so fail path works correct */ ++ fail = 1; ++ break; ++ } ++ } ++ ++ if (new_encoder != connector->encoder) { ++ changed = true; ++ connector->encoder = new_encoder; ++ } ++ } ++ ++ if (fail) { ++ ret = -EINVAL; ++ goto fail_no_encoder; ++ } ++ ++ count = 0; ++ list_for_each_entry(connector, &dev->mode_config.connector_list, head) { ++ if (!connector->encoder) ++ continue; ++ ++ save_crtcs[count++] = connector->encoder->crtc; ++ ++ if (connector->encoder->crtc == set->crtc) ++ new_crtc = NULL; ++ else ++ new_crtc = connector->encoder->crtc; ++ ++ for (ro = 0; ro < set->num_connectors; ro++) { ++ if (set->connectors[ro] == connector) ++ new_crtc = set->crtc; ++ } ++ if (new_crtc != connector->encoder->crtc) { ++ changed = true; ++ connector->encoder->crtc = new_crtc; ++ } ++ } ++ ++ /* mode_set_base is not a required function */ ++ if (flip_or_move && !crtc_funcs->mode_set_base) ++ changed = true; ++ ++ if (changed) { ++ set->crtc->fb = set->fb; ++ set->crtc->enabled = (set->mode != NULL); ++ if (set->mode != NULL) { ++ DRM_DEBUG("attempting to set mode from userspace\n"); ++ drm_mode_debug_printmodeline(set->mode); ++ if (!drm_crtc_helper_set_mode(set->crtc, set->mode, ++ set->x, set->y)) { ++ ret = -EINVAL; ++ goto fail_set_mode; ++ } ++ /* TODO are these needed? */ ++ set->crtc->desired_x = set->x; ++ set->crtc->desired_y = set->y; ++ set->crtc->desired_mode = set->mode; ++ } ++ drm_helper_disable_unused_functions(dev); ++ } else if (flip_or_move) { ++ if (set->crtc->fb != set->fb) ++ set->crtc->fb = set->fb; ++ crtc_funcs->mode_set_base(set->crtc, set->x, set->y); ++ } ++ ++ kfree(save_encoders); ++ kfree(save_crtcs); ++ return 0; ++ ++fail_set_mode: ++ set->crtc->enabled = save_enabled; ++ count = 0; ++ list_for_each_entry(connector, &dev->mode_config.connector_list, head) ++ connector->encoder->crtc = save_crtcs[count++]; ++fail_no_encoder: ++ kfree(save_crtcs); ++ count = 0; ++ list_for_each_entry(connector, &dev->mode_config.connector_list, head) { ++ connector->encoder = save_encoders[count++]; ++ } ++ kfree(save_encoders); ++ return ret; ++} ++EXPORT_SYMBOL(drm_crtc_helper_set_config); ++ ++bool drm_helper_plugged_event(struct drm_device *dev) ++{ ++ DRM_DEBUG("\n"); ++ ++ drm_helper_probe_connector_modes(dev, dev->mode_config.max_width, ++ dev->mode_config.max_height); ++ ++ drm_setup_crtcs(dev); ++ ++ /* alert the driver fb layer */ ++ dev->mode_config.funcs->fb_changed(dev); ++ ++ /* FIXME: send hotplug event */ ++ return true; ++} ++/** ++ * drm_initial_config - setup a sane initial connector configuration ++ * @dev: DRM device ++ * @can_grow: this configuration is growable ++ * ++ * LOCKING: ++ * Called at init time, must take mode config lock. ++ * ++ * Scan the CRTCs and connectors and try to put together an initial setup. ++ * At the moment, this is a cloned configuration across all heads with ++ * a new framebuffer object as the backing store. ++ * ++ * RETURNS: ++ * Zero if everything went ok, nonzero otherwise. ++ */ ++bool drm_helper_initial_config(struct drm_device *dev, bool can_grow) ++{ ++ int ret = false; ++ ++ drm_helper_plugged_event(dev); ++ return ret; ++} ++EXPORT_SYMBOL(drm_helper_initial_config); ++ ++/** ++ * drm_hotplug_stage_two ++ * @dev DRM device ++ * @connector hotpluged connector ++ * ++ * LOCKING. ++ * Caller must hold mode config lock, function might grab struct lock. ++ * ++ * Stage two of a hotplug. ++ * ++ * RETURNS: ++ * Zero on success, errno on failure. ++ */ ++int drm_helper_hotplug_stage_two(struct drm_device *dev) ++{ ++ drm_helper_plugged_event(dev); ++ ++ return 0; ++} ++EXPORT_SYMBOL(drm_helper_hotplug_stage_two); ++ ++int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb, ++ struct drm_mode_fb_cmd *mode_cmd) ++{ ++ fb->width = mode_cmd->width; ++ fb->height = mode_cmd->height; ++ fb->pitch = mode_cmd->pitch; ++ fb->bits_per_pixel = mode_cmd->bpp; ++ fb->depth = mode_cmd->depth; ++ ++ return 0; ++} ++EXPORT_SYMBOL(drm_helper_mode_fill_fb_struct); ++ ++int drm_helper_resume_force_mode(struct drm_device *dev) ++{ ++ struct drm_crtc *crtc; ++ int ret; ++ ++ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { ++ ++ if (!crtc->enabled) ++ continue; ++ ++ ret = drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x, ++ crtc->y); ++ ++ if (ret == false) ++ DRM_ERROR("failed to set mode on crtc %p\n", crtc); ++ } ++ return 0; ++} ++EXPORT_SYMBOL(drm_helper_resume_force_mode); +diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c +index 996097a..373e3de 100644 +--- a/drivers/gpu/drm/drm_drv.c ++++ b/drivers/gpu/drm/drm_drv.c +@@ -74,6 +74,9 @@ static struct drm_ioctl_desc drm_ioctls[] = { + DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_getsareactx, DRM_AUTH), + ++ DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_ROOT_ONLY), ++ + DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), +@@ -123,6 +126,23 @@ static struct drm_ioctl_desc drm_ioctls[] = { + DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, 0), + DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH), ++ ++ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_MASTER|DRM_CONTROL_ALLOW), ++ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_MASTER|DRM_CONTROL_ALLOW), ++ DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW), ++ DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), ++ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_MASTER), ++ DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER), ++ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_MASTER|DRM_CONTROL_ALLOW), ++ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_MASTER|DRM_CONTROL_ALLOW), ++ DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_mode_attachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), ++ DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_mode_detachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), ++ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_MASTER | DRM_CONTROL_ALLOW), ++ DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), ++ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), ++ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW), ++ DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW), ++ DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW), + }; + + #define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) +@@ -138,8 +158,6 @@ static struct drm_ioctl_desc drm_ioctls[] = { + */ + int drm_lastclose(struct drm_device * dev) + { +- struct drm_magic_entry *pt, *next; +- struct drm_map_list *r_list, *list_t; + struct drm_vma_entry *vma, *vma_temp; + int i; + +@@ -149,13 +167,7 @@ int drm_lastclose(struct drm_device * dev) + dev->driver->lastclose(dev); + DRM_DEBUG("driver lastclose completed\n"); + +- if (dev->unique) { +- drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER); +- dev->unique = NULL; +- dev->unique_len = 0; +- } +- +- if (dev->irq_enabled) ++ if (dev->irq_enabled && !drm_core_check_feature(dev, DRIVER_MODESET)) + drm_irq_uninstall(dev); + + mutex_lock(&dev->struct_mutex); +@@ -164,18 +176,9 @@ int drm_lastclose(struct drm_device * dev) + drm_drawable_free_all(dev); + del_timer(&dev->timer); + +- /* Clear pid list */ +- if (dev->magicfree.next) { +- list_for_each_entry_safe(pt, next, &dev->magicfree, head) { +- list_del(&pt->head); +- drm_ht_remove_item(&dev->magiclist, &pt->hash_item); +- drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC); +- } +- drm_ht_remove(&dev->magiclist); +- } +- + /* Clear AGP information */ +- if (drm_core_has_AGP(dev) && dev->agp) { ++ if (drm_core_has_AGP(dev) && dev->agp && ++ !drm_core_check_feature(dev, DRIVER_MODESET)) { + struct drm_agp_mem *entry, *tempe; + + /* Remove AGP resources, but leave dev->agp +@@ -194,7 +197,8 @@ int drm_lastclose(struct drm_device * dev) + dev->agp->acquired = 0; + dev->agp->enabled = 0; + } +- if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg) { ++ if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg && ++ !drm_core_check_feature(dev, DRIVER_MODESET)) { + drm_sg_cleanup(dev->sg); + dev->sg = NULL; + } +@@ -205,13 +209,6 @@ int drm_lastclose(struct drm_device * dev) + drm_free(vma, sizeof(*vma), DRM_MEM_VMAS); + } + +- list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) { +- if (!(r_list->map->flags & _DRM_DRIVER)) { +- drm_rmmap_locked(dev, r_list->map); +- r_list = NULL; +- } +- } +- + if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && dev->queuelist) { + for (i = 0; i < dev->queue_count; i++) { + if (dev->queuelist[i]) { +@@ -228,14 +225,11 @@ int drm_lastclose(struct drm_device * dev) + } + dev->queue_count = 0; + +- if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) ++ if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) && ++ !drm_core_check_feature(dev, DRIVER_MODESET)) + drm_dma_takedown(dev); + +- if (dev->lock.hw_lock) { +- dev->sigdata.lock = dev->lock.hw_lock = NULL; /* SHM removed */ +- dev->lock.file_priv = NULL; +- wake_up_interruptible(&dev->lock.lock_queue); +- } ++ dev->dev_mapping = NULL; + mutex_unlock(&dev->struct_mutex); + + DRM_DEBUG("lastclose completed\n"); +@@ -263,6 +257,8 @@ int drm_init(struct drm_driver *driver) + + DRM_DEBUG("\n"); + ++ INIT_LIST_HEAD(&driver->device_list); ++ + for (i = 0; driver->pci_driver.id_table[i].vendor != 0; i++) { + pid = (struct pci_device_id *)&driver->pci_driver.id_table[i]; + +@@ -298,6 +294,8 @@ EXPORT_SYMBOL(drm_init); + */ + static void drm_cleanup(struct drm_device * dev) + { ++ struct drm_driver *driver = dev->driver; ++ + DRM_DEBUG("\n"); + + if (!dev) { +@@ -329,35 +327,24 @@ static void drm_cleanup(struct drm_device * dev) + drm_ht_remove(&dev->map_hash); + drm_ctxbitmap_cleanup(dev); + ++ if (drm_core_check_feature(dev, DRIVER_MODESET)) ++ drm_put_minor(&dev->control); ++ ++ if (driver->driver_features & DRIVER_GEM) ++ drm_gem_destroy(dev); ++ + drm_put_minor(&dev->primary); + if (drm_put_dev(dev)) + DRM_ERROR("Cannot unload module\n"); + } + +-static int drm_minors_cleanup(int id, void *ptr, void *data) +-{ +- struct drm_minor *minor = ptr; +- struct drm_device *dev; +- struct drm_driver *driver = data; +- +- dev = minor->dev; +- if (minor->dev->driver != driver) +- return 0; +- +- if (minor->type != DRM_MINOR_LEGACY) +- return 0; +- +- if (dev) +- pci_dev_put(dev->pdev); +- drm_cleanup(dev); +- return 1; +-} +- + void drm_exit(struct drm_driver *driver) + { ++ struct drm_device *dev, *tmp; + DRM_DEBUG("\n"); + +- idr_for_each(&drm_minors_idr, &drm_minors_cleanup, driver); ++ list_for_each_entry_safe(dev, tmp, &driver->device_list, driver_item) ++ drm_cleanup(dev); + + DRM_INFO("Module unloaded\n"); + } +@@ -503,7 +490,7 @@ int drm_ioctl(struct inode *inode, struct file *filp, + retcode = -EINVAL; + } else if (((ioctl->flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)) || + ((ioctl->flags & DRM_AUTH) && !file_priv->authenticated) || +- ((ioctl->flags & DRM_MASTER) && !file_priv->master)) { ++ ((ioctl->flags & DRM_MASTER) && !file_priv->is_master)) { + retcode = -EACCES; + } else { + if (cmd & (IOC_IN | IOC_OUT)) { +diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c +new file mode 100644 +index 0000000..0fbb0da +--- /dev/null ++++ b/drivers/gpu/drm/drm_edid.c +@@ -0,0 +1,732 @@ ++/* ++ * Copyright (c) 2006 Luc Verhaegen (quirks list) ++ * Copyright (c) 2007-2008 Intel Corporation ++ * Jesse Barnes ++ * ++ * DDC probing routines (drm_ddc_read & drm_do_probe_ddc_edid) originally from ++ * FB layer. ++ * Copyright (C) 2006 Dennis Munsie ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sub license, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial portions ++ * of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL ++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING ++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ */ ++#include ++#include ++#include ++#include "drmP.h" ++#include "drm_edid.h" ++ ++/* ++ * TODO: ++ * - support EDID 1.4 (incl. CE blocks) ++ */ ++ ++/* ++ * EDID blocks out in the wild have a variety of bugs, try to collect ++ * them here (note that userspace may work around broken monitors first, ++ * but fixes should make their way here so that the kernel "just works" ++ * on as many displays as possible). ++ */ ++ ++/* First detailed mode wrong, use largest 60Hz mode */ ++#define EDID_QUIRK_PREFER_LARGE_60 (1 << 0) ++/* Reported 135MHz pixel clock is too high, needs adjustment */ ++#define EDID_QUIRK_135_CLOCK_TOO_HIGH (1 << 1) ++/* Prefer the largest mode at 75 Hz */ ++#define EDID_QUIRK_PREFER_LARGE_75 (1 << 2) ++/* Detail timing is in cm not mm */ ++#define EDID_QUIRK_DETAILED_IN_CM (1 << 3) ++/* Detailed timing descriptors have bogus size values, so just take the ++ * maximum size and use that. ++ */ ++#define EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE (1 << 4) ++/* Monitor forgot to set the first detailed is preferred bit. */ ++#define EDID_QUIRK_FIRST_DETAILED_PREFERRED (1 << 5) ++/* use +hsync +vsync for detailed mode */ ++#define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6) ++ ++static struct edid_quirk { ++ char *vendor; ++ int product_id; ++ u32 quirks; ++} edid_quirk_list[] = { ++ /* Acer AL1706 */ ++ { "ACR", 44358, EDID_QUIRK_PREFER_LARGE_60 }, ++ /* Acer F51 */ ++ { "API", 0x7602, EDID_QUIRK_PREFER_LARGE_60 }, ++ /* Unknown Acer */ ++ { "ACR", 2423, EDID_QUIRK_FIRST_DETAILED_PREFERRED }, ++ ++ /* Belinea 10 15 55 */ ++ { "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 }, ++ { "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 }, ++ ++ /* Envision Peripherals, Inc. EN-7100e */ ++ { "EPI", 59264, EDID_QUIRK_135_CLOCK_TOO_HIGH }, ++ ++ /* Funai Electronics PM36B */ ++ { "FCM", 13600, EDID_QUIRK_PREFER_LARGE_75 | ++ EDID_QUIRK_DETAILED_IN_CM }, ++ ++ /* LG Philips LCD LP154W01-A5 */ ++ { "LPL", 0, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE }, ++ { "LPL", 0x2a00, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE }, ++ ++ /* Philips 107p5 CRT */ ++ { "PHL", 57364, EDID_QUIRK_FIRST_DETAILED_PREFERRED }, ++ ++ /* Proview AY765C */ ++ { "PTS", 765, EDID_QUIRK_FIRST_DETAILED_PREFERRED }, ++ ++ /* Samsung SyncMaster 205BW. Note: irony */ ++ { "SAM", 541, EDID_QUIRK_DETAILED_SYNC_PP }, ++ /* Samsung SyncMaster 22[5-6]BW */ ++ { "SAM", 596, EDID_QUIRK_PREFER_LARGE_60 }, ++ { "SAM", 638, EDID_QUIRK_PREFER_LARGE_60 }, ++}; ++ ++ ++/* Valid EDID header has these bytes */ ++static u8 edid_header[] = { 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 }; ++ ++/** ++ * edid_is_valid - sanity check EDID data ++ * @edid: EDID data ++ * ++ * Sanity check the EDID block by looking at the header, the version number ++ * and the checksum. Return 0 if the EDID doesn't check out, or 1 if it's ++ * valid. ++ */ ++static bool edid_is_valid(struct edid *edid) ++{ ++ int i; ++ u8 csum = 0; ++ u8 *raw_edid = (u8 *)edid; ++ ++ if (memcmp(edid->header, edid_header, sizeof(edid_header))) ++ goto bad; ++ if (edid->version != 1) { ++ DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version); ++ goto bad; ++ } ++ if (edid->revision <= 0 || edid->revision > 3) { ++ DRM_ERROR("EDID has minor version %d, which is not between 0-3\n", edid->revision); ++ goto bad; ++ } ++ ++ for (i = 0; i < EDID_LENGTH; i++) ++ csum += raw_edid[i]; ++ if (csum) { ++ DRM_ERROR("EDID checksum is invalid, remainder is %d\n", csum); ++ goto bad; ++ } ++ ++ return 1; ++ ++bad: ++ if (raw_edid) { ++ DRM_ERROR("Raw EDID:\n"); ++ print_hex_dump_bytes(KERN_ERR, DUMP_PREFIX_NONE, raw_edid, EDID_LENGTH); ++ printk("\n"); ++ } ++ return 0; ++} ++ ++/** ++ * edid_vendor - match a string against EDID's obfuscated vendor field ++ * @edid: EDID to match ++ * @vendor: vendor string ++ * ++ * Returns true if @vendor is in @edid, false otherwise ++ */ ++static bool edid_vendor(struct edid *edid, char *vendor) ++{ ++ char edid_vendor[3]; ++ ++ edid_vendor[0] = ((edid->mfg_id[0] & 0x7c) >> 2) + '@'; ++ edid_vendor[1] = (((edid->mfg_id[0] & 0x3) << 3) | ++ ((edid->mfg_id[1] & 0xe0) >> 5)) + '@'; ++ edid_vendor[2] = (edid->mfg_id[2] & 0x1f) + '@'; ++ ++ return !strncmp(edid_vendor, vendor, 3); ++} ++ ++/** ++ * edid_get_quirks - return quirk flags for a given EDID ++ * @edid: EDID to process ++ * ++ * This tells subsequent routines what fixes they need to apply. ++ */ ++static u32 edid_get_quirks(struct edid *edid) ++{ ++ struct edid_quirk *quirk; ++ int i; ++ ++ for (i = 0; i < ARRAY_SIZE(edid_quirk_list); i++) { ++ quirk = &edid_quirk_list[i]; ++ ++ if (edid_vendor(edid, quirk->vendor) && ++ (EDID_PRODUCT_ID(edid) == quirk->product_id)) ++ return quirk->quirks; ++ } ++ ++ return 0; ++} ++ ++#define MODE_SIZE(m) ((m)->hdisplay * (m)->vdisplay) ++#define MODE_REFRESH_DIFF(m,r) (abs((m)->vrefresh - target_refresh)) ++ ++ ++/** ++ * edid_fixup_preferred - set preferred modes based on quirk list ++ * @connector: has mode list to fix up ++ * @quirks: quirks list ++ * ++ * Walk the mode list for @connector, clearing the preferred status ++ * on existing modes and setting it anew for the right mode ala @quirks. ++ */ ++static void edid_fixup_preferred(struct drm_connector *connector, ++ u32 quirks) ++{ ++ struct drm_display_mode *t, *cur_mode, *preferred_mode; ++ int target_refresh = 0; ++ ++ if (list_empty(&connector->probed_modes)) ++ return; ++ ++ if (quirks & EDID_QUIRK_PREFER_LARGE_60) ++ target_refresh = 60; ++ if (quirks & EDID_QUIRK_PREFER_LARGE_75) ++ target_refresh = 75; ++ ++ preferred_mode = list_first_entry(&connector->probed_modes, ++ struct drm_display_mode, head); ++ ++ list_for_each_entry_safe(cur_mode, t, &connector->probed_modes, head) { ++ cur_mode->type &= ~DRM_MODE_TYPE_PREFERRED; ++ ++ if (cur_mode == preferred_mode) ++ continue; ++ ++ /* Largest mode is preferred */ ++ if (MODE_SIZE(cur_mode) > MODE_SIZE(preferred_mode)) ++ preferred_mode = cur_mode; ++ ++ /* At a given size, try to get closest to target refresh */ ++ if ((MODE_SIZE(cur_mode) == MODE_SIZE(preferred_mode)) && ++ MODE_REFRESH_DIFF(cur_mode, target_refresh) < ++ MODE_REFRESH_DIFF(preferred_mode, target_refresh)) { ++ preferred_mode = cur_mode; ++ } ++ } ++ ++ preferred_mode->type |= DRM_MODE_TYPE_PREFERRED; ++} ++ ++/** ++ * drm_mode_std - convert standard mode info (width, height, refresh) into mode ++ * @t: standard timing params ++ * ++ * Take the standard timing params (in this case width, aspect, and refresh) ++ * and convert them into a real mode using CVT. ++ * ++ * Punts for now, but should eventually use the FB layer's CVT based mode ++ * generation code. ++ */ ++struct drm_display_mode *drm_mode_std(struct drm_device *dev, ++ struct std_timing *t) ++{ ++ struct drm_display_mode *mode; ++ int hsize = t->hsize * 8 + 248, vsize; ++ ++ mode = drm_mode_create(dev); ++ if (!mode) ++ return NULL; ++ ++ if (t->aspect_ratio == 0) ++ vsize = (hsize * 10) / 16; ++ else if (t->aspect_ratio == 1) ++ vsize = (hsize * 3) / 4; ++ else if (t->aspect_ratio == 2) ++ vsize = (hsize * 4) / 5; ++ else ++ vsize = (hsize * 9) / 16; ++ ++ drm_mode_set_name(mode); ++ ++ return mode; ++} ++ ++/** ++ * drm_mode_detailed - create a new mode from an EDID detailed timing section ++ * @dev: DRM device (needed to create new mode) ++ * @edid: EDID block ++ * @timing: EDID detailed timing info ++ * @quirks: quirks to apply ++ * ++ * An EDID detailed timing block contains enough info for us to create and ++ * return a new struct drm_display_mode. ++ */ ++static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev, ++ struct edid *edid, ++ struct detailed_timing *timing, ++ u32 quirks) ++{ ++ struct drm_display_mode *mode; ++ struct detailed_pixel_timing *pt = &timing->data.pixel_data; ++ ++ if (pt->stereo) { ++ printk(KERN_WARNING "stereo mode not supported\n"); ++ return NULL; ++ } ++ if (!pt->separate_sync) { ++ printk(KERN_WARNING "integrated sync not supported\n"); ++ return NULL; ++ } ++ ++ mode = drm_mode_create(dev); ++ if (!mode) ++ return NULL; ++ ++ mode->type = DRM_MODE_TYPE_DRIVER; ++ ++ if (quirks & EDID_QUIRK_135_CLOCK_TOO_HIGH) ++ timing->pixel_clock = 1088; ++ ++ mode->clock = timing->pixel_clock * 10; ++ ++ mode->hdisplay = (pt->hactive_hi << 8) | pt->hactive_lo; ++ mode->hsync_start = mode->hdisplay + ((pt->hsync_offset_hi << 8) | ++ pt->hsync_offset_lo); ++ mode->hsync_end = mode->hsync_start + ++ ((pt->hsync_pulse_width_hi << 8) | ++ pt->hsync_pulse_width_lo); ++ mode->htotal = mode->hdisplay + ((pt->hblank_hi << 8) | pt->hblank_lo); ++ ++ mode->vdisplay = (pt->vactive_hi << 8) | pt->vactive_lo; ++ mode->vsync_start = mode->vdisplay + ((pt->vsync_offset_hi << 8) | ++ pt->vsync_offset_lo); ++ mode->vsync_end = mode->vsync_start + ++ ((pt->vsync_pulse_width_hi << 8) | ++ pt->vsync_pulse_width_lo); ++ mode->vtotal = mode->vdisplay + ((pt->vblank_hi << 8) | pt->vblank_lo); ++ ++ drm_mode_set_name(mode); ++ ++ if (pt->interlaced) ++ mode->flags |= DRM_MODE_FLAG_INTERLACE; ++ ++ if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) { ++ pt->hsync_positive = 1; ++ pt->vsync_positive = 1; ++ } ++ ++ mode->flags |= pt->hsync_positive ? DRM_MODE_FLAG_PHSYNC : DRM_MODE_FLAG_NHSYNC; ++ mode->flags |= pt->vsync_positive ? DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC; ++ ++ mode->width_mm = pt->width_mm_lo | (pt->width_mm_hi << 8); ++ mode->height_mm = pt->height_mm_lo | (pt->height_mm_hi << 8); ++ ++ if (quirks & EDID_QUIRK_DETAILED_IN_CM) { ++ mode->width_mm *= 10; ++ mode->height_mm *= 10; ++ } ++ ++ if (quirks & EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE) { ++ mode->width_mm = edid->width_cm * 10; ++ mode->height_mm = edid->height_cm * 10; ++ } ++ ++ return mode; ++} ++ ++/* ++ * Detailed mode info for the EDID "established modes" data to use. ++ */ ++static struct drm_display_mode edid_est_modes[] = { ++ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840, ++ 968, 1056, 0, 600, 601, 605, 628, 0, ++ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@60Hz */ ++ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 36000, 800, 824, ++ 896, 1024, 0, 600, 601, 603, 625, 0, ++ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@56Hz */ ++ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656, ++ 720, 840, 0, 480, 481, 484, 500, 0, ++ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@75Hz */ ++ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664, ++ 704, 832, 0, 480, 489, 491, 520, 0, ++ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@72Hz */ ++ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 30240, 640, 704, ++ 768, 864, 0, 480, 483, 486, 525, 0, ++ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@67Hz */ ++ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25200, 640, 656, ++ 752, 800, 0, 480, 490, 492, 525, 0, ++ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@60Hz */ ++ { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 738, ++ 846, 900, 0, 400, 421, 423, 449, 0, ++ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 720x400@88Hz */ ++ { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 28320, 720, 738, ++ 846, 900, 0, 400, 412, 414, 449, 0, ++ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 720x400@70Hz */ ++ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296, ++ 1440, 1688, 0, 1024, 1025, 1028, 1066, 0, ++ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x1024@75Hz */ ++ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78800, 1024, 1040, ++ 1136, 1312, 0, 768, 769, 772, 800, 0, ++ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1024x768@75Hz */ ++ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048, ++ 1184, 1328, 0, 768, 771, 777, 806, 0, ++ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@70Hz */ ++ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048, ++ 1184, 1344, 0, 768, 771, 777, 806, 0, ++ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@60Hz */ ++ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER,44900, 1024, 1032, ++ 1208, 1264, 0, 768, 768, 776, 817, 0, ++ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE) }, /* 1024x768@43Hz */ ++ { DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 57284, 832, 864, ++ 928, 1152, 0, 624, 625, 628, 667, 0, ++ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 832x624@75Hz */ ++ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 49500, 800, 816, ++ 896, 1056, 0, 600, 601, 604, 625, 0, ++ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@75Hz */ ++ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 50000, 800, 856, ++ 976, 1040, 0, 600, 637, 643, 666, 0, ++ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@72Hz */ ++ { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216, ++ 1344, 1600, 0, 864, 865, 868, 900, 0, ++ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1152x864@75Hz */ ++}; ++ ++#define EDID_EST_TIMINGS 16 ++#define EDID_STD_TIMINGS 8 ++#define EDID_DETAILED_TIMINGS 4 ++ ++/** ++ * add_established_modes - get est. modes from EDID and add them ++ * @edid: EDID block to scan ++ * ++ * Each EDID block contains a bitmap of the supported "established modes" list ++ * (defined above). Tease them out and add them to the global modes list. ++ */ ++static int add_established_modes(struct drm_connector *connector, struct edid *edid) ++{ ++ struct drm_device *dev = connector->dev; ++ unsigned long est_bits = edid->established_timings.t1 | ++ (edid->established_timings.t2 << 8) | ++ ((edid->established_timings.mfg_rsvd & 0x80) << 9); ++ int i, modes = 0; ++ ++ for (i = 0; i <= EDID_EST_TIMINGS; i++) ++ if (est_bits & (1<dev; ++ int i, modes = 0; ++ ++ for (i = 0; i < EDID_STD_TIMINGS; i++) { ++ struct std_timing *t = &edid->standard_timings[i]; ++ struct drm_display_mode *newmode; ++ ++ /* If std timings bytes are 1, 1 it's empty */ ++ if (t->hsize == 1 && (t->aspect_ratio | t->vfreq) == 1) ++ continue; ++ ++ newmode = drm_mode_std(dev, &edid->standard_timings[i]); ++ if (newmode) { ++ drm_mode_probed_add(connector, newmode); ++ modes++; ++ } ++ } ++ ++ return modes; ++} ++ ++/** ++ * add_detailed_modes - get detailed mode info from EDID data ++ * @connector: attached connector ++ * @edid: EDID block to scan ++ * @quirks: quirks to apply ++ * ++ * Some of the detailed timing sections may contain mode information. Grab ++ * it and add it to the list. ++ */ ++static int add_detailed_info(struct drm_connector *connector, ++ struct edid *edid, u32 quirks) ++{ ++ struct drm_device *dev = connector->dev; ++ int i, j, modes = 0; ++ ++ for (i = 0; i < EDID_DETAILED_TIMINGS; i++) { ++ struct detailed_timing *timing = &edid->detailed_timings[i]; ++ struct detailed_non_pixel *data = &timing->data.other_data; ++ struct drm_display_mode *newmode; ++ ++ /* EDID up to and including 1.2 may put monitor info here */ ++ if (edid->version == 1 && edid->revision < 3) ++ continue; ++ ++ /* Detailed mode timing */ ++ if (timing->pixel_clock) { ++ newmode = drm_mode_detailed(dev, edid, timing, quirks); ++ if (!newmode) ++ continue; ++ ++ /* First detailed mode is preferred */ ++ if (i == 0 && edid->preferred_timing) ++ newmode->type |= DRM_MODE_TYPE_PREFERRED; ++ drm_mode_probed_add(connector, newmode); ++ ++ modes++; ++ continue; ++ } ++ ++ /* Other timing or info */ ++ switch (data->type) { ++ case EDID_DETAIL_MONITOR_SERIAL: ++ break; ++ case EDID_DETAIL_MONITOR_STRING: ++ break; ++ case EDID_DETAIL_MONITOR_RANGE: ++ /* Get monitor range data */ ++ break; ++ case EDID_DETAIL_MONITOR_NAME: ++ break; ++ case EDID_DETAIL_MONITOR_CPDATA: ++ break; ++ case EDID_DETAIL_STD_MODES: ++ /* Five modes per detailed section */ ++ for (j = 0; j < 5; i++) { ++ struct std_timing *std; ++ struct drm_display_mode *newmode; ++ ++ std = &data->data.timings[j]; ++ newmode = drm_mode_std(dev, std); ++ if (newmode) { ++ drm_mode_probed_add(connector, newmode); ++ modes++; ++ } ++ } ++ break; ++ default: ++ break; ++ } ++ } ++ ++ return modes; ++} ++ ++#define DDC_ADDR 0x50 ++ ++unsigned char *drm_do_probe_ddc_edid(struct i2c_adapter *adapter) ++{ ++ unsigned char start = 0x0; ++ unsigned char *buf = kmalloc(EDID_LENGTH, GFP_KERNEL); ++ struct i2c_msg msgs[] = { ++ { ++ .addr = DDC_ADDR, ++ .flags = 0, ++ .len = 1, ++ .buf = &start, ++ }, { ++ .addr = DDC_ADDR, ++ .flags = I2C_M_RD, ++ .len = EDID_LENGTH, ++ .buf = buf, ++ } ++ }; ++ ++ if (!buf) { ++ dev_warn(&adapter->dev, "unable to allocate memory for EDID " ++ "block.\n"); ++ return NULL; ++ } ++ ++ if (i2c_transfer(adapter, msgs, 2) == 2) ++ return buf; ++ ++ dev_info(&adapter->dev, "unable to read EDID block.\n"); ++ kfree(buf); ++ return NULL; ++} ++EXPORT_SYMBOL(drm_do_probe_ddc_edid); ++ ++static unsigned char *drm_ddc_read(struct i2c_adapter *adapter) ++{ ++ struct i2c_algo_bit_data *algo_data = adapter->algo_data; ++ unsigned char *edid = NULL; ++ int i, j; ++ ++ algo_data->setscl(algo_data->data, 1); ++ ++ for (i = 0; i < 1; i++) { ++ /* For some old monitors we need the ++ * following process to initialize/stop DDC ++ */ ++ algo_data->setsda(algo_data->data, 1); ++ msleep(13); ++ ++ algo_data->setscl(algo_data->data, 1); ++ for (j = 0; j < 5; j++) { ++ msleep(10); ++ if (algo_data->getscl(algo_data->data)) ++ break; ++ } ++ if (j == 5) ++ continue; ++ ++ algo_data->setsda(algo_data->data, 0); ++ msleep(15); ++ algo_data->setscl(algo_data->data, 0); ++ msleep(15); ++ algo_data->setsda(algo_data->data, 1); ++ msleep(15); ++ ++ /* Do the real work */ ++ edid = drm_do_probe_ddc_edid(adapter); ++ algo_data->setsda(algo_data->data, 0); ++ algo_data->setscl(algo_data->data, 0); ++ msleep(15); ++ ++ algo_data->setscl(algo_data->data, 1); ++ for (j = 0; j < 10; j++) { ++ msleep(10); ++ if (algo_data->getscl(algo_data->data)) ++ break; ++ } ++ ++ algo_data->setsda(algo_data->data, 1); ++ msleep(15); ++ algo_data->setscl(algo_data->data, 0); ++ algo_data->setsda(algo_data->data, 0); ++ if (edid) ++ break; ++ } ++ /* Release the DDC lines when done or the Apple Cinema HD display ++ * will switch off ++ */ ++ algo_data->setsda(algo_data->data, 1); ++ algo_data->setscl(algo_data->data, 1); ++ ++ return edid; ++} ++ ++/** ++ * drm_get_edid - get EDID data, if available ++ * @connector: connector we're probing ++ * @adapter: i2c adapter to use for DDC ++ * ++ * Poke the given connector's i2c channel to grab EDID data if possible. ++ * ++ * Return edid data or NULL if we couldn't find any. ++ */ ++struct edid *drm_get_edid(struct drm_connector *connector, ++ struct i2c_adapter *adapter) ++{ ++ struct edid *edid; ++ ++ edid = (struct edid *)drm_ddc_read(adapter); ++ if (!edid) { ++ dev_warn(&connector->dev->pdev->dev, "%s: no EDID data\n", ++ drm_get_connector_name(connector)); ++ return NULL; ++ } ++ if (!edid_is_valid(edid)) { ++ dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n", ++ drm_get_connector_name(connector)); ++ kfree(edid); ++ return NULL; ++ } ++ ++ connector->display_info.raw_edid = (char *)edid; ++ ++ return edid; ++} ++EXPORT_SYMBOL(drm_get_edid); ++ ++/** ++ * drm_add_edid_modes - add modes from EDID data, if available ++ * @connector: connector we're probing ++ * @edid: edid data ++ * ++ * Add the specified modes to the connector's mode list. ++ * ++ * Return number of modes added or 0 if we couldn't find any. ++ */ ++int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid) ++{ ++ int num_modes = 0; ++ u32 quirks; ++ ++ if (edid == NULL) { ++ return 0; ++ } ++ if (!edid_is_valid(edid)) { ++ dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n", ++ drm_get_connector_name(connector)); ++ return 0; ++ } ++ ++ quirks = edid_get_quirks(edid); ++ ++ num_modes += add_established_modes(connector, edid); ++ num_modes += add_standard_modes(connector, edid); ++ num_modes += add_detailed_info(connector, edid, quirks); ++ ++ if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75)) ++ edid_fixup_preferred(connector, quirks); ++ ++ connector->display_info.serration_vsync = edid->serration_vsync; ++ connector->display_info.sync_on_green = edid->sync_on_green; ++ connector->display_info.composite_sync = edid->composite_sync; ++ connector->display_info.separate_syncs = edid->separate_syncs; ++ connector->display_info.blank_to_black = edid->blank_to_black; ++ connector->display_info.video_level = edid->video_level; ++ connector->display_info.digital = edid->digital; ++ connector->display_info.width_mm = edid->width_cm * 10; ++ connector->display_info.height_mm = edid->height_cm * 10; ++ connector->display_info.gamma = edid->gamma; ++ connector->display_info.gtf_supported = edid->default_gtf; ++ connector->display_info.standard_color = edid->standard_color; ++ connector->display_info.display_type = edid->display_type; ++ connector->display_info.active_off_supported = edid->pm_active_off; ++ connector->display_info.suspend_supported = edid->pm_suspend; ++ connector->display_info.standby_supported = edid->pm_standby; ++ connector->display_info.gamma = edid->gamma; ++ ++ return num_modes; ++} ++EXPORT_SYMBOL(drm_add_edid_modes); +diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c +index 78eeed5..3733e36 100644 +--- a/drivers/gpu/drm/drm_fops.c ++++ b/drivers/gpu/drm/drm_fops.c +@@ -35,7 +35,6 @@ + */ + + #include "drmP.h" +-#include "drm_sarea.h" + #include + #include + +@@ -44,10 +43,8 @@ static int drm_open_helper(struct inode *inode, struct file *filp, + + static int drm_setup(struct drm_device * dev) + { +- drm_local_map_t *map; + int i; + int ret; +- u32 sareapage; + + if (dev->driver->firstopen) { + ret = dev->driver->firstopen(dev); +@@ -55,20 +52,14 @@ static int drm_setup(struct drm_device * dev) + return ret; + } + +- dev->magicfree.next = NULL; +- +- /* prebuild the SAREA */ +- sareapage = max_t(unsigned, SAREA_MAX, PAGE_SIZE); +- i = drm_addmap(dev, 0, sareapage, _DRM_SHM, _DRM_CONTAINS_LOCK, &map); +- if (i != 0) +- return i; +- + atomic_set(&dev->ioctl_count, 0); + atomic_set(&dev->vma_count, 0); +- dev->buf_use = 0; +- atomic_set(&dev->buf_alloc, 0); + +- if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) { ++ if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) && ++ !drm_core_check_feature(dev, DRIVER_MODESET)) { ++ dev->buf_use = 0; ++ atomic_set(&dev->buf_alloc, 0); ++ + i = drm_dma_setup(dev); + if (i < 0) + return i; +@@ -77,16 +68,12 @@ static int drm_setup(struct drm_device * dev) + for (i = 0; i < ARRAY_SIZE(dev->counts); i++) + atomic_set(&dev->counts[i], 0); + +- drm_ht_create(&dev->magiclist, DRM_MAGIC_HASH_ORDER); +- INIT_LIST_HEAD(&dev->magicfree); +- + dev->sigdata.lock = NULL; +- init_waitqueue_head(&dev->lock.lock_queue); ++ + dev->queue_count = 0; + dev->queue_reserved = 0; + dev->queue_slots = 0; + dev->queuelist = NULL; +- dev->irq_enabled = 0; + dev->context_flag = 0; + dev->interrupt_flag = 0; + dev->dma_flag = 0; +@@ -147,10 +134,20 @@ int drm_open(struct inode *inode, struct file *filp) + spin_lock(&dev->count_lock); + if (!dev->open_count++) { + spin_unlock(&dev->count_lock); +- return drm_setup(dev); ++ retcode = drm_setup(dev); ++ goto out; + } + spin_unlock(&dev->count_lock); + } ++out: ++ mutex_lock(&dev->struct_mutex); ++ if (minor->type == DRM_MINOR_LEGACY) { ++ BUG_ON((dev->dev_mapping != NULL) && ++ (dev->dev_mapping != inode->i_mapping)); ++ if (dev->dev_mapping == NULL) ++ dev->dev_mapping = inode->i_mapping; ++ } ++ mutex_unlock(&dev->struct_mutex); + + return retcode; + } +@@ -255,6 +252,7 @@ static int drm_open_helper(struct inode *inode, struct file *filp, + priv->lock_count = 0; + + INIT_LIST_HEAD(&priv->lhead); ++ INIT_LIST_HEAD(&priv->fbs); + + if (dev->driver->driver_features & DRIVER_GEM) + drm_gem_open(dev, priv); +@@ -265,10 +263,42 @@ static int drm_open_helper(struct inode *inode, struct file *filp, + goto out_free; + } + ++ ++ /* if there is no current master make this fd it */ + mutex_lock(&dev->struct_mutex); +- if (list_empty(&dev->filelist)) +- priv->master = 1; ++ if (!priv->minor->master) { ++ /* create a new master */ ++ priv->minor->master = drm_master_create(priv->minor); ++ if (!priv->minor->master) { ++ ret = -ENOMEM; ++ goto out_free; ++ } + ++ priv->is_master = 1; ++ /* take another reference for the copy in the local file priv */ ++ priv->master = drm_master_get(priv->minor->master); ++ ++ priv->authenticated = 1; ++ ++ mutex_unlock(&dev->struct_mutex); ++ if (dev->driver->master_create) { ++ ret = dev->driver->master_create(dev, priv->master); ++ if (ret) { ++ mutex_lock(&dev->struct_mutex); ++ /* drop both references if this fails */ ++ drm_master_put(&priv->minor->master); ++ drm_master_put(&priv->master); ++ mutex_unlock(&dev->struct_mutex); ++ goto out_free; ++ } ++ } ++ } else { ++ /* get a reference to the master */ ++ priv->master = drm_master_get(priv->minor->master); ++ mutex_unlock(&dev->struct_mutex); ++ } ++ ++ mutex_lock(&dev->struct_mutex); + list_add(&priv->lhead, &dev->filelist); + mutex_unlock(&dev->struct_mutex); + +@@ -314,6 +344,74 @@ int drm_fasync(int fd, struct file *filp, int on) + } + EXPORT_SYMBOL(drm_fasync); + ++/* ++ * Reclaim locked buffers; note that this may be a bad idea if the current ++ * context doesn't have the hw lock... ++ */ ++static void drm_reclaim_locked_buffers(struct drm_device *dev, struct file *f) ++{ ++ struct drm_file *file_priv = f->private_data; ++ ++ if (drm_i_have_hw_lock(dev, file_priv)) { ++ dev->driver->reclaim_buffers_locked(dev, file_priv); ++ } else { ++ unsigned long _end = jiffies + 3 * DRM_HZ; ++ int locked = 0; ++ ++ drm_idlelock_take(&file_priv->master->lock); ++ ++ /* ++ * Wait for a while. ++ */ ++ do { ++ spin_lock_bh(&file_priv->master->lock.spinlock); ++ locked = file_priv->master->lock.idle_has_lock; ++ spin_unlock_bh(&file_priv->master->lock.spinlock); ++ if (locked) ++ break; ++ schedule(); ++ } while (!time_after_eq(jiffies, _end)); ++ ++ if (!locked) { ++ DRM_ERROR("reclaim_buffers_locked() deadlock. Please rework this\n" ++ "\tdriver to use reclaim_buffers_idlelocked() instead.\n" ++ "\tI will go on reclaiming the buffers anyway.\n"); ++ } ++ ++ dev->driver->reclaim_buffers_locked(dev, file_priv); ++ drm_idlelock_release(&file_priv->master->lock); ++ } ++} ++ ++static void drm_master_release(struct drm_device *dev, struct file *filp) ++{ ++ struct drm_file *file_priv = filp->private_data; ++ ++ if (dev->driver->reclaim_buffers_locked && ++ file_priv->master->lock.hw_lock) ++ drm_reclaim_locked_buffers(dev, filp); ++ ++ if (dev->driver->reclaim_buffers_idlelocked && ++ file_priv->master->lock.hw_lock) { ++ drm_idlelock_take(&file_priv->master->lock); ++ dev->driver->reclaim_buffers_idlelocked(dev, file_priv); ++ drm_idlelock_release(&file_priv->master->lock); ++ } ++ ++ ++ if (drm_i_have_hw_lock(dev, file_priv)) { ++ DRM_DEBUG("File %p released, freeing lock for context %d\n", ++ filp, _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock)); ++ drm_lock_free(&file_priv->master->lock, ++ _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock)); ++ } ++ ++ if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) && ++ !dev->driver->reclaim_buffers_locked) { ++ dev->driver->reclaim_buffers(dev, file_priv); ++ } ++} ++ + /** + * Release file. + * +@@ -348,60 +446,9 @@ int drm_release(struct inode *inode, struct file *filp) + (long)old_encode_dev(file_priv->minor->device), + dev->open_count); + +- if (dev->driver->reclaim_buffers_locked && dev->lock.hw_lock) { +- if (drm_i_have_hw_lock(dev, file_priv)) { +- dev->driver->reclaim_buffers_locked(dev, file_priv); +- } else { +- unsigned long endtime = jiffies + 3 * DRM_HZ; +- int locked = 0; +- +- drm_idlelock_take(&dev->lock); +- +- /* +- * Wait for a while. +- */ +- +- do{ +- spin_lock_bh(&dev->lock.spinlock); +- locked = dev->lock.idle_has_lock; +- spin_unlock_bh(&dev->lock.spinlock); +- if (locked) +- break; +- schedule(); +- } while (!time_after_eq(jiffies, endtime)); +- +- if (!locked) { +- DRM_ERROR("reclaim_buffers_locked() deadlock. Please rework this\n" +- "\tdriver to use reclaim_buffers_idlelocked() instead.\n" +- "\tI will go on reclaiming the buffers anyway.\n"); +- } +- +- dev->driver->reclaim_buffers_locked(dev, file_priv); +- drm_idlelock_release(&dev->lock); +- } +- } +- +- if (dev->driver->reclaim_buffers_idlelocked && dev->lock.hw_lock) { +- +- drm_idlelock_take(&dev->lock); +- dev->driver->reclaim_buffers_idlelocked(dev, file_priv); +- drm_idlelock_release(&dev->lock); +- +- } +- +- if (drm_i_have_hw_lock(dev, file_priv)) { +- DRM_DEBUG("File %p released, freeing lock for context %d\n", +- filp, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); +- +- drm_lock_free(&dev->lock, +- _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); +- } +- +- +- if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) && +- !dev->driver->reclaim_buffers_locked) { +- dev->driver->reclaim_buffers(dev, file_priv); +- } ++ /* if the master has gone away we can't do anything with the lock */ ++ if (file_priv->minor->master) ++ drm_master_release(dev, filp); + + if (dev->driver->driver_features & DRIVER_GEM) + drm_gem_release(dev, file_priv); +@@ -428,12 +475,24 @@ int drm_release(struct inode *inode, struct file *filp) + mutex_unlock(&dev->ctxlist_mutex); + + mutex_lock(&dev->struct_mutex); +- if (file_priv->remove_auth_on_close == 1) { ++ ++ if (file_priv->is_master) { + struct drm_file *temp; ++ list_for_each_entry(temp, &dev->filelist, lhead) { ++ if ((temp->master == file_priv->master) && ++ (temp != file_priv)) ++ temp->authenticated = 0; ++ } + +- list_for_each_entry(temp, &dev->filelist, lhead) +- temp->authenticated = 0; ++ if (file_priv->minor->master == file_priv->master) { ++ /* drop the reference held my the minor */ ++ drm_master_put(&file_priv->minor->master); ++ } + } ++ ++ /* drop the reference held my the file priv */ ++ drm_master_put(&file_priv->master); ++ file_priv->is_master = 0; + list_del(&file_priv->lhead); + mutex_unlock(&dev->struct_mutex); + +@@ -448,9 +507,9 @@ int drm_release(struct inode *inode, struct file *filp) + atomic_inc(&dev->counts[_DRM_STAT_CLOSES]); + spin_lock(&dev->count_lock); + if (!--dev->open_count) { +- if (atomic_read(&dev->ioctl_count) || dev->blocked) { +- DRM_ERROR("Device busy: %d %d\n", +- atomic_read(&dev->ioctl_count), dev->blocked); ++ if (atomic_read(&dev->ioctl_count)) { ++ DRM_ERROR("Device busy: %d\n", ++ atomic_read(&dev->ioctl_count)); + spin_unlock(&dev->count_lock); + unlock_kernel(); + return -EBUSY; +diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c +index ccd1afd..9da5814 100644 +--- a/drivers/gpu/drm/drm_gem.c ++++ b/drivers/gpu/drm/drm_gem.c +@@ -64,6 +64,13 @@ + * up at a later date, and as our interface with shmfs for memory allocation. + */ + ++/* ++ * We make up offsets for buffer objects so we can recognize them at ++ * mmap time. ++ */ ++#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1) ++#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16) ++ + /** + * Initialize the GEM device fields + */ +@@ -71,6 +78,8 @@ + int + drm_gem_init(struct drm_device *dev) + { ++ struct drm_gem_mm *mm; ++ + spin_lock_init(&dev->object_name_lock); + idr_init(&dev->object_name_idr); + atomic_set(&dev->object_count, 0); +@@ -79,9 +88,41 @@ drm_gem_init(struct drm_device *dev) + atomic_set(&dev->pin_memory, 0); + atomic_set(&dev->gtt_count, 0); + atomic_set(&dev->gtt_memory, 0); ++ ++ mm = drm_calloc(1, sizeof(struct drm_gem_mm), DRM_MEM_MM); ++ if (!mm) { ++ DRM_ERROR("out of memory\n"); ++ return -ENOMEM; ++ } ++ ++ dev->mm_private = mm; ++ ++ if (drm_ht_create(&mm->offset_hash, 19)) { ++ drm_free(mm, sizeof(struct drm_gem_mm), DRM_MEM_MM); ++ return -ENOMEM; ++ } ++ ++ if (drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START, ++ DRM_FILE_PAGE_OFFSET_SIZE)) { ++ drm_free(mm, sizeof(struct drm_gem_mm), DRM_MEM_MM); ++ drm_ht_remove(&mm->offset_hash); ++ return -ENOMEM; ++ } ++ + return 0; + } + ++void ++drm_gem_destroy(struct drm_device *dev) ++{ ++ struct drm_gem_mm *mm = dev->mm_private; ++ ++ drm_mm_takedown(&mm->offset_manager); ++ drm_ht_remove(&mm->offset_hash); ++ drm_free(mm, sizeof(struct drm_gem_mm), DRM_MEM_MM); ++ dev->mm_private = NULL; ++} ++ + /** + * Allocate a GEM object of the specified size with shmfs backing store + */ +@@ -419,3 +460,73 @@ drm_gem_object_handle_free(struct kref *kref) + } + EXPORT_SYMBOL(drm_gem_object_handle_free); + ++/** ++ * drm_gem_mmap - memory map routine for GEM objects ++ * @filp: DRM file pointer ++ * @vma: VMA for the area to be mapped ++ * ++ * If a driver supports GEM object mapping, mmap calls on the DRM file ++ * descriptor will end up here. ++ * ++ * If we find the object based on the offset passed in (vma->vm_pgoff will ++ * contain the fake offset we created when the GTT map ioctl was called on ++ * the object), we set up the driver fault handler so that any accesses ++ * to the object can be trapped, to perform migration, GTT binding, surface ++ * register allocation, or performance monitoring. ++ */ ++int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) ++{ ++ struct drm_file *priv = filp->private_data; ++ struct drm_device *dev = priv->minor->dev; ++ struct drm_gem_mm *mm = dev->mm_private; ++ struct drm_map *map = NULL; ++ struct drm_gem_object *obj; ++ struct drm_hash_item *hash; ++ unsigned long prot; ++ int ret = 0; ++ ++ mutex_lock(&dev->struct_mutex); ++ ++ if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) { ++ mutex_unlock(&dev->struct_mutex); ++ return drm_mmap(filp, vma); ++ } ++ ++ map = drm_hash_entry(hash, struct drm_map_list, hash)->map; ++ if (!map || ++ ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) { ++ ret = -EPERM; ++ goto out_unlock; ++ } ++ ++ /* Check for valid size. */ ++ if (map->size < vma->vm_end - vma->vm_start) { ++ ret = -EINVAL; ++ goto out_unlock; ++ } ++ ++ obj = map->handle; ++ if (!obj->dev->driver->gem_vm_ops) { ++ ret = -EINVAL; ++ goto out_unlock; ++ } ++ ++ vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND; ++ vma->vm_ops = obj->dev->driver->gem_vm_ops; ++ vma->vm_private_data = map->handle; ++ /* FIXME: use pgprot_writecombine when available */ ++ prot = pgprot_val(vma->vm_page_prot); ++#ifdef CONFIG_X86 ++ prot |= _PAGE_CACHE_WC; ++#endif ++ vma->vm_page_prot = __pgprot(prot); ++ ++ vma->vm_file = filp; /* Needed for drm_vm_open() */ ++ drm_vm_open_locked(vma); ++ ++out_unlock: ++ mutex_unlock(&dev->struct_mutex); ++ ++ return ret; ++} ++EXPORT_SYMBOL(drm_gem_mmap); +diff --git a/drivers/gpu/drm/drm_hashtab.c b/drivers/gpu/drm/drm_hashtab.c +index 3316067..af539f7 100644 +--- a/drivers/gpu/drm/drm_hashtab.c ++++ b/drivers/gpu/drm/drm_hashtab.c +@@ -127,6 +127,7 @@ int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item) + } + return 0; + } ++EXPORT_SYMBOL(drm_ht_insert_item); + + /* + * Just insert an item and return any "bits" bit key that hasn't been +@@ -188,6 +189,7 @@ int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item) + ht->fill--; + return 0; + } ++EXPORT_SYMBOL(drm_ht_remove_item); + + void drm_ht_remove(struct drm_open_hash *ht) + { +diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c +index 16829fb..1fad762 100644 +--- a/drivers/gpu/drm/drm_ioctl.c ++++ b/drivers/gpu/drm/drm_ioctl.c +@@ -53,12 +53,13 @@ int drm_getunique(struct drm_device *dev, void *data, + struct drm_file *file_priv) + { + struct drm_unique *u = data; ++ struct drm_master *master = file_priv->master; + +- if (u->unique_len >= dev->unique_len) { +- if (copy_to_user(u->unique, dev->unique, dev->unique_len)) ++ if (u->unique_len >= master->unique_len) { ++ if (copy_to_user(u->unique, master->unique, master->unique_len)) + return -EFAULT; + } +- u->unique_len = dev->unique_len; ++ u->unique_len = master->unique_len; + + return 0; + } +@@ -81,36 +82,38 @@ int drm_setunique(struct drm_device *dev, void *data, + struct drm_file *file_priv) + { + struct drm_unique *u = data; ++ struct drm_master *master = file_priv->master; + int domain, bus, slot, func, ret; + +- if (dev->unique_len || dev->unique) ++ if (master->unique_len || master->unique) + return -EBUSY; + + if (!u->unique_len || u->unique_len > 1024) + return -EINVAL; + +- dev->unique_len = u->unique_len; +- dev->unique = drm_alloc(u->unique_len + 1, DRM_MEM_DRIVER); +- if (!dev->unique) ++ master->unique_len = u->unique_len; ++ master->unique_size = u->unique_len + 1; ++ master->unique = drm_alloc(master->unique_size, DRM_MEM_DRIVER); ++ if (!master->unique) + return -ENOMEM; +- if (copy_from_user(dev->unique, u->unique, dev->unique_len)) ++ if (copy_from_user(master->unique, u->unique, master->unique_len)) + return -EFAULT; + +- dev->unique[dev->unique_len] = '\0'; ++ master->unique[master->unique_len] = '\0'; + + dev->devname = + drm_alloc(strlen(dev->driver->pci_driver.name) + +- strlen(dev->unique) + 2, DRM_MEM_DRIVER); ++ strlen(master->unique) + 2, DRM_MEM_DRIVER); + if (!dev->devname) + return -ENOMEM; + + sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name, +- dev->unique); ++ master->unique); + + /* Return error if the busid submitted doesn't match the device's actual + * busid. + */ +- ret = sscanf(dev->unique, "PCI:%d:%d:%d", &bus, &slot, &func); ++ ret = sscanf(master->unique, "PCI:%d:%d:%d", &bus, &slot, &func); + if (ret != 3) + return -EINVAL; + domain = bus >> 8; +@@ -125,34 +128,38 @@ int drm_setunique(struct drm_device *dev, void *data, + return 0; + } + +-static int drm_set_busid(struct drm_device * dev) ++static int drm_set_busid(struct drm_device *dev, struct drm_file *file_priv) + { ++ struct drm_master *master = file_priv->master; + int len; + +- if (dev->unique != NULL) +- return 0; ++ if (master->unique != NULL) ++ return -EBUSY; + +- dev->unique_len = 40; +- dev->unique = drm_alloc(dev->unique_len + 1, DRM_MEM_DRIVER); +- if (dev->unique == NULL) ++ master->unique_len = 40; ++ master->unique_size = master->unique_len; ++ master->unique = drm_alloc(master->unique_size, DRM_MEM_DRIVER); ++ if (master->unique == NULL) + return -ENOMEM; + +- len = snprintf(dev->unique, dev->unique_len, "pci:%04x:%02x:%02x.%d", +- drm_get_pci_domain(dev), dev->pdev->bus->number, ++ len = snprintf(master->unique, master->unique_len, "pci:%04x:%02x:%02x.%d", ++ drm_get_pci_domain(dev), ++ dev->pdev->bus->number, + PCI_SLOT(dev->pdev->devfn), + PCI_FUNC(dev->pdev->devfn)); +- +- if (len > dev->unique_len) +- DRM_ERROR("Unique buffer overflowed\n"); ++ if (len >= master->unique_len) ++ DRM_ERROR("buffer overflow"); ++ else ++ master->unique_len = len; + + dev->devname = +- drm_alloc(strlen(dev->driver->pci_driver.name) + dev->unique_len + ++ drm_alloc(strlen(dev->driver->pci_driver.name) + master->unique_len + + 2, DRM_MEM_DRIVER); + if (dev->devname == NULL) + return -ENOMEM; + + sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name, +- dev->unique); ++ master->unique); + + return 0; + } +@@ -276,7 +283,7 @@ int drm_getstats(struct drm_device *dev, void *data, + for (i = 0; i < dev->counters; i++) { + if (dev->types[i] == _DRM_STAT_LOCK) + stats->data[i].value = +- (dev->lock.hw_lock ? dev->lock.hw_lock->lock : 0); ++ (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0); + else + stats->data[i].value = atomic_read(&dev->counts[i]); + stats->data[i].type = dev->types[i]; +@@ -318,7 +325,7 @@ int drm_setversion(struct drm_device *dev, void *data, struct drm_file *file_pri + /* + * Version 1.1 includes tying of DRM to specific device + */ +- drm_set_busid(dev); ++ drm_set_busid(dev, file_priv); + } + } + +diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c +index 1e787f8..1608f8d 100644 +--- a/drivers/gpu/drm/drm_irq.c ++++ b/drivers/gpu/drm/drm_irq.c +@@ -305,6 +305,8 @@ int drm_control(struct drm_device *dev, void *data, + case DRM_INST_HANDLER: + if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) + return 0; ++ if (drm_core_check_feature(dev, DRIVER_MODESET)) ++ return 0; + if (dev->if_version < DRM_IF_VERSION(1, 2) && + ctl->irq != dev->pdev->irq) + return -EINVAL; +@@ -312,6 +314,8 @@ int drm_control(struct drm_device *dev, void *data, + case DRM_UNINST_HANDLER: + if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) + return 0; ++ if (drm_core_check_feature(dev, DRIVER_MODESET)) ++ return 0; + return drm_irq_uninstall(dev); + default: + return -EINVAL; +@@ -427,6 +431,45 @@ void drm_vblank_put(struct drm_device *dev, int crtc) + EXPORT_SYMBOL(drm_vblank_put); + + /** ++ * drm_vblank_pre_modeset - account for vblanks across mode sets ++ * @dev: DRM device ++ * @crtc: CRTC in question ++ * @post: post or pre mode set? ++ * ++ * Account for vblank events across mode setting events, which will likely ++ * reset the hardware frame counter. ++ */ ++void drm_vblank_pre_modeset(struct drm_device *dev, int crtc) ++{ ++ /* ++ * To avoid all the problems that might happen if interrupts ++ * were enabled/disabled around or between these calls, we just ++ * have the kernel take a reference on the CRTC (just once though ++ * to avoid corrupting the count if multiple, mismatch calls occur), ++ * so that interrupts remain enabled in the interim. ++ */ ++ if (!dev->vblank_inmodeset[crtc]) { ++ dev->vblank_inmodeset[crtc] = 1; ++ drm_vblank_get(dev, crtc); ++ } ++} ++EXPORT_SYMBOL(drm_vblank_pre_modeset); ++ ++void drm_vblank_post_modeset(struct drm_device *dev, int crtc) ++{ ++ unsigned long irqflags; ++ ++ if (dev->vblank_inmodeset[crtc]) { ++ spin_lock_irqsave(&dev->vbl_lock, irqflags); ++ dev->vblank_disable_allowed = 1; ++ dev->vblank_inmodeset[crtc] = 0; ++ spin_unlock_irqrestore(&dev->vbl_lock, irqflags); ++ drm_vblank_put(dev, crtc); ++ } ++} ++EXPORT_SYMBOL(drm_vblank_post_modeset); ++ ++/** + * drm_modeset_ctl - handle vblank event counter changes across mode switch + * @DRM_IOCTL_ARGS: standard ioctl arguments + * +@@ -441,7 +484,6 @@ int drm_modeset_ctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) + { + struct drm_modeset_ctl *modeset = data; +- unsigned long irqflags; + int crtc, ret = 0; + + /* If drm_vblank_init() hasn't been called yet, just no-op */ +@@ -454,28 +496,12 @@ int drm_modeset_ctl(struct drm_device *dev, void *data, + goto out; + } + +- /* +- * To avoid all the problems that might happen if interrupts +- * were enabled/disabled around or between these calls, we just +- * have the kernel take a reference on the CRTC (just once though +- * to avoid corrupting the count if multiple, mismatch calls occur), +- * so that interrupts remain enabled in the interim. +- */ + switch (modeset->cmd) { + case _DRM_PRE_MODESET: +- if (!dev->vblank_inmodeset[crtc]) { +- dev->vblank_inmodeset[crtc] = 1; +- drm_vblank_get(dev, crtc); +- } ++ drm_vblank_pre_modeset(dev, crtc); + break; + case _DRM_POST_MODESET: +- if (dev->vblank_inmodeset[crtc]) { +- spin_lock_irqsave(&dev->vbl_lock, irqflags); +- dev->vblank_disable_allowed = 1; +- dev->vblank_inmodeset[crtc] = 0; +- spin_unlock_irqrestore(&dev->vbl_lock, irqflags); +- drm_vblank_put(dev, crtc); +- } ++ drm_vblank_post_modeset(dev, crtc); + break; + default: + ret = -EINVAL; +diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c +index 1cfa720..46e7b28 100644 +--- a/drivers/gpu/drm/drm_lock.c ++++ b/drivers/gpu/drm/drm_lock.c +@@ -52,6 +52,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv) + { + DECLARE_WAITQUEUE(entry, current); + struct drm_lock *lock = data; ++ struct drm_master *master = file_priv->master; + int ret = 0; + + ++file_priv->lock_count; +@@ -64,26 +65,27 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv) + + DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n", + lock->context, task_pid_nr(current), +- dev->lock.hw_lock->lock, lock->flags); ++ master->lock.hw_lock->lock, lock->flags); + + if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE)) + if (lock->context < 0) + return -EINVAL; + +- add_wait_queue(&dev->lock.lock_queue, &entry); +- spin_lock_bh(&dev->lock.spinlock); +- dev->lock.user_waiters++; +- spin_unlock_bh(&dev->lock.spinlock); ++ add_wait_queue(&master->lock.lock_queue, &entry); ++ spin_lock_bh(&master->lock.spinlock); ++ master->lock.user_waiters++; ++ spin_unlock_bh(&master->lock.spinlock); ++ + for (;;) { + __set_current_state(TASK_INTERRUPTIBLE); +- if (!dev->lock.hw_lock) { ++ if (!master->lock.hw_lock) { + /* Device has been unregistered */ + ret = -EINTR; + break; + } +- if (drm_lock_take(&dev->lock, lock->context)) { +- dev->lock.file_priv = file_priv; +- dev->lock.lock_time = jiffies; ++ if (drm_lock_take(&master->lock, lock->context)) { ++ master->lock.file_priv = file_priv; ++ master->lock.lock_time = jiffies; + atomic_inc(&dev->counts[_DRM_STAT_LOCKS]); + break; /* Got lock */ + } +@@ -95,11 +97,11 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv) + break; + } + } +- spin_lock_bh(&dev->lock.spinlock); +- dev->lock.user_waiters--; +- spin_unlock_bh(&dev->lock.spinlock); ++ spin_lock_bh(&master->lock.spinlock); ++ master->lock.user_waiters--; ++ spin_unlock_bh(&master->lock.spinlock); + __set_current_state(TASK_RUNNING); +- remove_wait_queue(&dev->lock.lock_queue, &entry); ++ remove_wait_queue(&master->lock.lock_queue, &entry); + + DRM_DEBUG("%d %s\n", lock->context, + ret ? "interrupted" : "has lock"); +@@ -108,14 +110,14 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv) + /* don't set the block all signals on the master process for now + * really probably not the correct answer but lets us debug xkb + * xserver for now */ +- if (!file_priv->master) { ++ if (!file_priv->is_master) { + sigemptyset(&dev->sigmask); + sigaddset(&dev->sigmask, SIGSTOP); + sigaddset(&dev->sigmask, SIGTSTP); + sigaddset(&dev->sigmask, SIGTTIN); + sigaddset(&dev->sigmask, SIGTTOU); + dev->sigdata.context = lock->context; +- dev->sigdata.lock = dev->lock.hw_lock; ++ dev->sigdata.lock = master->lock.hw_lock; + block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask); + } + +@@ -154,6 +156,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv) + int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv) + { + struct drm_lock *lock = data; ++ struct drm_master *master = file_priv->master; + + if (lock->context == DRM_KERNEL_CONTEXT) { + DRM_ERROR("Process %d using kernel context %d\n", +@@ -169,7 +172,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv) + if (dev->driver->kernel_context_switch_unlock) + dev->driver->kernel_context_switch_unlock(dev); + else { +- if (drm_lock_free(&dev->lock,lock->context)) { ++ if (drm_lock_free(&master->lock, lock->context)) { + /* FIXME: Should really bail out here. */ + } + } +@@ -379,9 +382,10 @@ EXPORT_SYMBOL(drm_idlelock_release); + + int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv) + { +- return (file_priv->lock_count && dev->lock.hw_lock && +- _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) && +- dev->lock.file_priv == file_priv); ++ struct drm_master *master = file_priv->master; ++ return (file_priv->lock_count && master->lock.hw_lock && ++ _DRM_LOCK_IS_HELD(master->lock.hw_lock->lock) && ++ master->lock.file_priv == file_priv); + } + + EXPORT_SYMBOL(drm_i_have_hw_lock); +diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c +index 217ad7d..367c590 100644 +--- a/drivers/gpu/drm/drm_mm.c ++++ b/drivers/gpu/drm/drm_mm.c +@@ -296,3 +296,4 @@ void drm_mm_takedown(struct drm_mm * mm) + + drm_free(entry, sizeof(*entry), DRM_MEM_MM); + } ++EXPORT_SYMBOL(drm_mm_takedown); +diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c +new file mode 100644 +index 0000000..c9b80fd +--- /dev/null ++++ b/drivers/gpu/drm/drm_modes.c +@@ -0,0 +1,576 @@ ++/* ++ * The list_sort function is (presumably) licensed under the GPL (see the ++ * top level "COPYING" file for details). ++ * ++ * The remainder of this file is: ++ * ++ * Copyright © 1997-2003 by The XFree86 Project, Inc. ++ * Copyright © 2007 Dave Airlie ++ * Copyright © 2007-2008 Intel Corporation ++ * Jesse Barnes ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ * ++ * Except as contained in this notice, the name of the copyright holder(s) ++ * and author(s) shall not be used in advertising or otherwise to promote ++ * the sale, use or other dealings in this Software without prior written ++ * authorization from the copyright holder(s) and author(s). ++ */ ++ ++#include ++#include "drmP.h" ++#include "drm.h" ++#include "drm_crtc.h" ++ ++/** ++ * drm_mode_debug_printmodeline - debug print a mode ++ * @dev: DRM device ++ * @mode: mode to print ++ * ++ * LOCKING: ++ * None. ++ * ++ * Describe @mode using DRM_DEBUG. ++ */ ++void drm_mode_debug_printmodeline(struct drm_display_mode *mode) ++{ ++ DRM_DEBUG("Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x\n", ++ mode->base.id, mode->name, mode->vrefresh, mode->clock, ++ mode->hdisplay, mode->hsync_start, ++ mode->hsync_end, mode->htotal, ++ mode->vdisplay, mode->vsync_start, ++ mode->vsync_end, mode->vtotal, mode->type, mode->flags); ++} ++EXPORT_SYMBOL(drm_mode_debug_printmodeline); ++ ++/** ++ * drm_mode_set_name - set the name on a mode ++ * @mode: name will be set in this mode ++ * ++ * LOCKING: ++ * None. ++ * ++ * Set the name of @mode to a standard format. ++ */ ++void drm_mode_set_name(struct drm_display_mode *mode) ++{ ++ snprintf(mode->name, DRM_DISPLAY_MODE_LEN, "%dx%d", mode->hdisplay, ++ mode->vdisplay); ++} ++EXPORT_SYMBOL(drm_mode_set_name); ++ ++/** ++ * drm_mode_list_concat - move modes from one list to another ++ * @head: source list ++ * @new: dst list ++ * ++ * LOCKING: ++ * Caller must ensure both lists are locked. ++ * ++ * Move all the modes from @head to @new. ++ */ ++void drm_mode_list_concat(struct list_head *head, struct list_head *new) ++{ ++ ++ struct list_head *entry, *tmp; ++ ++ list_for_each_safe(entry, tmp, head) { ++ list_move_tail(entry, new); ++ } ++} ++EXPORT_SYMBOL(drm_mode_list_concat); ++ ++/** ++ * drm_mode_width - get the width of a mode ++ * @mode: mode ++ * ++ * LOCKING: ++ * None. ++ * ++ * Return @mode's width (hdisplay) value. ++ * ++ * FIXME: is this needed? ++ * ++ * RETURNS: ++ * @mode->hdisplay ++ */ ++int drm_mode_width(struct drm_display_mode *mode) ++{ ++ return mode->hdisplay; ++ ++} ++EXPORT_SYMBOL(drm_mode_width); ++ ++/** ++ * drm_mode_height - get the height of a mode ++ * @mode: mode ++ * ++ * LOCKING: ++ * None. ++ * ++ * Return @mode's height (vdisplay) value. ++ * ++ * FIXME: is this needed? ++ * ++ * RETURNS: ++ * @mode->vdisplay ++ */ ++int drm_mode_height(struct drm_display_mode *mode) ++{ ++ return mode->vdisplay; ++} ++EXPORT_SYMBOL(drm_mode_height); ++ ++/** ++ * drm_mode_vrefresh - get the vrefresh of a mode ++ * @mode: mode ++ * ++ * LOCKING: ++ * None. ++ * ++ * Return @mode's vrefresh rate or calculate it if necessary. ++ * ++ * FIXME: why is this needed? shouldn't vrefresh be set already? ++ * ++ * RETURNS: ++ * Vertical refresh rate of @mode x 1000. For precision reasons. ++ */ ++int drm_mode_vrefresh(struct drm_display_mode *mode) ++{ ++ int refresh = 0; ++ unsigned int calc_val; ++ ++ if (mode->vrefresh > 0) ++ refresh = mode->vrefresh; ++ else if (mode->htotal > 0 && mode->vtotal > 0) { ++ /* work out vrefresh the value will be x1000 */ ++ calc_val = (mode->clock * 1000); ++ ++ calc_val /= mode->htotal; ++ calc_val *= 1000; ++ calc_val /= mode->vtotal; ++ ++ refresh = calc_val; ++ if (mode->flags & DRM_MODE_FLAG_INTERLACE) ++ refresh *= 2; ++ if (mode->flags & DRM_MODE_FLAG_DBLSCAN) ++ refresh /= 2; ++ if (mode->vscan > 1) ++ refresh /= mode->vscan; ++ } ++ return refresh; ++} ++EXPORT_SYMBOL(drm_mode_vrefresh); ++ ++/** ++ * drm_mode_set_crtcinfo - set CRTC modesetting parameters ++ * @p: mode ++ * @adjust_flags: unused? (FIXME) ++ * ++ * LOCKING: ++ * None. ++ * ++ * Setup the CRTC modesetting parameters for @p, adjusting if necessary. ++ */ ++void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags) ++{ ++ if ((p == NULL) || ((p->type & DRM_MODE_TYPE_CRTC_C) == DRM_MODE_TYPE_BUILTIN)) ++ return; ++ ++ p->crtc_hdisplay = p->hdisplay; ++ p->crtc_hsync_start = p->hsync_start; ++ p->crtc_hsync_end = p->hsync_end; ++ p->crtc_htotal = p->htotal; ++ p->crtc_hskew = p->hskew; ++ p->crtc_vdisplay = p->vdisplay; ++ p->crtc_vsync_start = p->vsync_start; ++ p->crtc_vsync_end = p->vsync_end; ++ p->crtc_vtotal = p->vtotal; ++ ++ if (p->flags & DRM_MODE_FLAG_INTERLACE) { ++ if (adjust_flags & CRTC_INTERLACE_HALVE_V) { ++ p->crtc_vdisplay /= 2; ++ p->crtc_vsync_start /= 2; ++ p->crtc_vsync_end /= 2; ++ p->crtc_vtotal /= 2; ++ } ++ ++ p->crtc_vtotal |= 1; ++ } ++ ++ if (p->flags & DRM_MODE_FLAG_DBLSCAN) { ++ p->crtc_vdisplay *= 2; ++ p->crtc_vsync_start *= 2; ++ p->crtc_vsync_end *= 2; ++ p->crtc_vtotal *= 2; ++ } ++ ++ if (p->vscan > 1) { ++ p->crtc_vdisplay *= p->vscan; ++ p->crtc_vsync_start *= p->vscan; ++ p->crtc_vsync_end *= p->vscan; ++ p->crtc_vtotal *= p->vscan; ++ } ++ ++ p->crtc_vblank_start = min(p->crtc_vsync_start, p->crtc_vdisplay); ++ p->crtc_vblank_end = max(p->crtc_vsync_end, p->crtc_vtotal); ++ p->crtc_hblank_start = min(p->crtc_hsync_start, p->crtc_hdisplay); ++ p->crtc_hblank_end = max(p->crtc_hsync_end, p->crtc_htotal); ++ ++ p->crtc_hadjusted = false; ++ p->crtc_vadjusted = false; ++} ++EXPORT_SYMBOL(drm_mode_set_crtcinfo); ++ ++ ++/** ++ * drm_mode_duplicate - allocate and duplicate an existing mode ++ * @m: mode to duplicate ++ * ++ * LOCKING: ++ * None. ++ * ++ * Just allocate a new mode, copy the existing mode into it, and return ++ * a pointer to it. Used to create new instances of established modes. ++ */ ++struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev, ++ struct drm_display_mode *mode) ++{ ++ struct drm_display_mode *nmode; ++ int new_id; ++ ++ nmode = drm_mode_create(dev); ++ if (!nmode) ++ return NULL; ++ ++ new_id = nmode->base.id; ++ *nmode = *mode; ++ nmode->base.id = new_id; ++ INIT_LIST_HEAD(&nmode->head); ++ return nmode; ++} ++EXPORT_SYMBOL(drm_mode_duplicate); ++ ++/** ++ * drm_mode_equal - test modes for equality ++ * @mode1: first mode ++ * @mode2: second mode ++ * ++ * LOCKING: ++ * None. ++ * ++ * Check to see if @mode1 and @mode2 are equivalent. ++ * ++ * RETURNS: ++ * True if the modes are equal, false otherwise. ++ */ ++bool drm_mode_equal(struct drm_display_mode *mode1, struct drm_display_mode *mode2) ++{ ++ /* do clock check convert to PICOS so fb modes get matched ++ * the same */ ++ if (mode1->clock && mode2->clock) { ++ if (KHZ2PICOS(mode1->clock) != KHZ2PICOS(mode2->clock)) ++ return false; ++ } else if (mode1->clock != mode2->clock) ++ return false; ++ ++ if (mode1->hdisplay == mode2->hdisplay && ++ mode1->hsync_start == mode2->hsync_start && ++ mode1->hsync_end == mode2->hsync_end && ++ mode1->htotal == mode2->htotal && ++ mode1->hskew == mode2->hskew && ++ mode1->vdisplay == mode2->vdisplay && ++ mode1->vsync_start == mode2->vsync_start && ++ mode1->vsync_end == mode2->vsync_end && ++ mode1->vtotal == mode2->vtotal && ++ mode1->vscan == mode2->vscan && ++ mode1->flags == mode2->flags) ++ return true; ++ ++ return false; ++} ++EXPORT_SYMBOL(drm_mode_equal); ++ ++/** ++ * drm_mode_validate_size - make sure modes adhere to size constraints ++ * @dev: DRM device ++ * @mode_list: list of modes to check ++ * @maxX: maximum width ++ * @maxY: maximum height ++ * @maxPitch: max pitch ++ * ++ * LOCKING: ++ * Caller must hold a lock protecting @mode_list. ++ * ++ * The DRM device (@dev) has size and pitch limits. Here we validate the ++ * modes we probed for @dev against those limits and set their status as ++ * necessary. ++ */ ++void drm_mode_validate_size(struct drm_device *dev, ++ struct list_head *mode_list, ++ int maxX, int maxY, int maxPitch) ++{ ++ struct drm_display_mode *mode; ++ ++ list_for_each_entry(mode, mode_list, head) { ++ if (maxPitch > 0 && mode->hdisplay > maxPitch) ++ mode->status = MODE_BAD_WIDTH; ++ ++ if (maxX > 0 && mode->hdisplay > maxX) ++ mode->status = MODE_VIRTUAL_X; ++ ++ if (maxY > 0 && mode->vdisplay > maxY) ++ mode->status = MODE_VIRTUAL_Y; ++ } ++} ++EXPORT_SYMBOL(drm_mode_validate_size); ++ ++/** ++ * drm_mode_validate_clocks - validate modes against clock limits ++ * @dev: DRM device ++ * @mode_list: list of modes to check ++ * @min: minimum clock rate array ++ * @max: maximum clock rate array ++ * @n_ranges: number of clock ranges (size of arrays) ++ * ++ * LOCKING: ++ * Caller must hold a lock protecting @mode_list. ++ * ++ * Some code may need to check a mode list against the clock limits of the ++ * device in question. This function walks the mode list, testing to make ++ * sure each mode falls within a given range (defined by @min and @max ++ * arrays) and sets @mode->status as needed. ++ */ ++void drm_mode_validate_clocks(struct drm_device *dev, ++ struct list_head *mode_list, ++ int *min, int *max, int n_ranges) ++{ ++ struct drm_display_mode *mode; ++ int i; ++ ++ list_for_each_entry(mode, mode_list, head) { ++ bool good = false; ++ for (i = 0; i < n_ranges; i++) { ++ if (mode->clock >= min[i] && mode->clock <= max[i]) { ++ good = true; ++ break; ++ } ++ } ++ if (!good) ++ mode->status = MODE_CLOCK_RANGE; ++ } ++} ++EXPORT_SYMBOL(drm_mode_validate_clocks); ++ ++/** ++ * drm_mode_prune_invalid - remove invalid modes from mode list ++ * @dev: DRM device ++ * @mode_list: list of modes to check ++ * @verbose: be verbose about it ++ * ++ * LOCKING: ++ * Caller must hold a lock protecting @mode_list. ++ * ++ * Once mode list generation is complete, a caller can use this routine to ++ * remove invalid modes from a mode list. If any of the modes have a ++ * status other than %MODE_OK, they are removed from @mode_list and freed. ++ */ ++void drm_mode_prune_invalid(struct drm_device *dev, ++ struct list_head *mode_list, bool verbose) ++{ ++ struct drm_display_mode *mode, *t; ++ ++ list_for_each_entry_safe(mode, t, mode_list, head) { ++ if (mode->status != MODE_OK) { ++ list_del(&mode->head); ++ if (verbose) { ++ drm_mode_debug_printmodeline(mode); ++ DRM_DEBUG("Not using %s mode %d\n", mode->name, mode->status); ++ } ++ drm_mode_destroy(dev, mode); ++ } ++ } ++} ++EXPORT_SYMBOL(drm_mode_prune_invalid); ++ ++/** ++ * drm_mode_compare - compare modes for favorability ++ * @lh_a: list_head for first mode ++ * @lh_b: list_head for second mode ++ * ++ * LOCKING: ++ * None. ++ * ++ * Compare two modes, given by @lh_a and @lh_b, returning a value indicating ++ * which is better. ++ * ++ * RETURNS: ++ * Negative if @lh_a is better than @lh_b, zero if they're equivalent, or ++ * positive if @lh_b is better than @lh_a. ++ */ ++static int drm_mode_compare(struct list_head *lh_a, struct list_head *lh_b) ++{ ++ struct drm_display_mode *a = list_entry(lh_a, struct drm_display_mode, head); ++ struct drm_display_mode *b = list_entry(lh_b, struct drm_display_mode, head); ++ int diff; ++ ++ diff = ((b->type & DRM_MODE_TYPE_PREFERRED) != 0) - ++ ((a->type & DRM_MODE_TYPE_PREFERRED) != 0); ++ if (diff) ++ return diff; ++ diff = b->hdisplay * b->vdisplay - a->hdisplay * a->vdisplay; ++ if (diff) ++ return diff; ++ diff = b->clock - a->clock; ++ return diff; ++} ++ ++/* FIXME: what we don't have a list sort function? */ ++/* list sort from Mark J Roberts (mjr@znex.org) */ ++void list_sort(struct list_head *head, ++ int (*cmp)(struct list_head *a, struct list_head *b)) ++{ ++ struct list_head *p, *q, *e, *list, *tail, *oldhead; ++ int insize, nmerges, psize, qsize, i; ++ ++ list = head->next; ++ list_del(head); ++ insize = 1; ++ for (;;) { ++ p = oldhead = list; ++ list = tail = NULL; ++ nmerges = 0; ++ ++ while (p) { ++ nmerges++; ++ q = p; ++ psize = 0; ++ for (i = 0; i < insize; i++) { ++ psize++; ++ q = q->next == oldhead ? NULL : q->next; ++ if (!q) ++ break; ++ } ++ ++ qsize = insize; ++ while (psize > 0 || (qsize > 0 && q)) { ++ if (!psize) { ++ e = q; ++ q = q->next; ++ qsize--; ++ if (q == oldhead) ++ q = NULL; ++ } else if (!qsize || !q) { ++ e = p; ++ p = p->next; ++ psize--; ++ if (p == oldhead) ++ p = NULL; ++ } else if (cmp(p, q) <= 0) { ++ e = p; ++ p = p->next; ++ psize--; ++ if (p == oldhead) ++ p = NULL; ++ } else { ++ e = q; ++ q = q->next; ++ qsize--; ++ if (q == oldhead) ++ q = NULL; ++ } ++ if (tail) ++ tail->next = e; ++ else ++ list = e; ++ e->prev = tail; ++ tail = e; ++ } ++ p = q; ++ } ++ ++ tail->next = list; ++ list->prev = tail; ++ ++ if (nmerges <= 1) ++ break; ++ ++ insize *= 2; ++ } ++ ++ head->next = list; ++ head->prev = list->prev; ++ list->prev->next = head; ++ list->prev = head; ++} ++ ++/** ++ * drm_mode_sort - sort mode list ++ * @mode_list: list to sort ++ * ++ * LOCKING: ++ * Caller must hold a lock protecting @mode_list. ++ * ++ * Sort @mode_list by favorability, putting good modes first. ++ */ ++void drm_mode_sort(struct list_head *mode_list) ++{ ++ list_sort(mode_list, drm_mode_compare); ++} ++EXPORT_SYMBOL(drm_mode_sort); ++ ++/** ++ * drm_mode_connector_list_update - update the mode list for the connector ++ * @connector: the connector to update ++ * ++ * LOCKING: ++ * Caller must hold a lock protecting @mode_list. ++ * ++ * This moves the modes from the @connector probed_modes list ++ * to the actual mode list. It compares the probed mode against the current ++ * list and only adds different modes. All modes unverified after this point ++ * will be removed by the prune invalid modes. ++ */ ++void drm_mode_connector_list_update(struct drm_connector *connector) ++{ ++ struct drm_display_mode *mode; ++ struct drm_display_mode *pmode, *pt; ++ int found_it; ++ ++ list_for_each_entry_safe(pmode, pt, &connector->probed_modes, ++ head) { ++ found_it = 0; ++ /* go through current modes checking for the new probed mode */ ++ list_for_each_entry(mode, &connector->modes, head) { ++ if (drm_mode_equal(pmode, mode)) { ++ found_it = 1; ++ /* if equal delete the probed mode */ ++ mode->status = pmode->status; ++ list_del(&pmode->head); ++ drm_mode_destroy(connector->dev, pmode); ++ break; ++ } ++ } ++ ++ if (!found_it) { ++ list_move_tail(&pmode->head, &connector->modes); ++ } ++ } ++} ++EXPORT_SYMBOL(drm_mode_connector_list_update); +diff --git a/drivers/gpu/drm/drm_proc.c b/drivers/gpu/drm/drm_proc.c +index ae73b7f..7dbaa1a 100644 +--- a/drivers/gpu/drm/drm_proc.c ++++ b/drivers/gpu/drm/drm_proc.c +@@ -195,6 +195,7 @@ static int drm_name_info(char *buf, char **start, off_t offset, int request, + int *eof, void *data) + { + struct drm_minor *minor = (struct drm_minor *) data; ++ struct drm_master *master = minor->master; + struct drm_device *dev = minor->dev; + int len = 0; + +@@ -203,13 +204,16 @@ static int drm_name_info(char *buf, char **start, off_t offset, int request, + return 0; + } + ++ if (!master) ++ return 0; ++ + *start = &buf[offset]; + *eof = 0; + +- if (dev->unique) { ++ if (master->unique) { + DRM_PROC_PRINT("%s %s %s\n", + dev->driver->pci_driver.name, +- pci_name(dev->pdev), dev->unique); ++ pci_name(dev->pdev), master->unique); + } else { + DRM_PROC_PRINT("%s %s\n", dev->driver->pci_driver.name, + pci_name(dev->pdev)); +diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c +index 66c96ec..5ca132a 100644 +--- a/drivers/gpu/drm/drm_stub.c ++++ b/drivers/gpu/drm/drm_stub.c +@@ -57,6 +57,14 @@ static int drm_minor_get_id(struct drm_device *dev, int type) + int ret; + int base = 0, limit = 63; + ++ if (type == DRM_MINOR_CONTROL) { ++ base += 64; ++ limit = base + 127; ++ } else if (type == DRM_MINOR_RENDER) { ++ base += 128; ++ limit = base + 255; ++ } ++ + again: + if (idr_pre_get(&drm_minors_idr, GFP_KERNEL) == 0) { + DRM_ERROR("Out of memory expanding drawable idr\n"); +@@ -79,6 +87,104 @@ again: + return new_id; + } + ++struct drm_master *drm_master_create(struct drm_minor *minor) ++{ ++ struct drm_master *master; ++ ++ master = drm_calloc(1, sizeof(*master), DRM_MEM_DRIVER); ++ if (!master) ++ return NULL; ++ ++ kref_init(&master->refcount); ++ spin_lock_init(&master->lock.spinlock); ++ init_waitqueue_head(&master->lock.lock_queue); ++ drm_ht_create(&master->magiclist, DRM_MAGIC_HASH_ORDER); ++ INIT_LIST_HEAD(&master->magicfree); ++ master->minor = minor; ++ ++ list_add_tail(&master->head, &minor->master_list); ++ ++ return master; ++} ++ ++struct drm_master *drm_master_get(struct drm_master *master) ++{ ++ kref_get(&master->refcount); ++ return master; ++} ++ ++static void drm_master_destroy(struct kref *kref) ++{ ++ struct drm_master *master = container_of(kref, struct drm_master, refcount); ++ struct drm_magic_entry *pt, *next; ++ struct drm_device *dev = master->minor->dev; ++ ++ list_del(&master->head); ++ ++ if (dev->driver->master_destroy) ++ dev->driver->master_destroy(dev, master); ++ ++ if (master->unique) { ++ drm_free(master->unique, master->unique_size, DRM_MEM_DRIVER); ++ master->unique = NULL; ++ master->unique_len = 0; ++ } ++ ++ list_for_each_entry_safe(pt, next, &master->magicfree, head) { ++ list_del(&pt->head); ++ drm_ht_remove_item(&master->magiclist, &pt->hash_item); ++ drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC); ++ } ++ ++ drm_ht_remove(&master->magiclist); ++ ++ if (master->lock.hw_lock) { ++ if (dev->sigdata.lock == master->lock.hw_lock) ++ dev->sigdata.lock = NULL; ++ master->lock.hw_lock = NULL; ++ master->lock.file_priv = NULL; ++ wake_up_interruptible(&master->lock.lock_queue); ++ } ++ ++ drm_free(master, sizeof(*master), DRM_MEM_DRIVER); ++} ++ ++void drm_master_put(struct drm_master **master) ++{ ++ kref_put(&(*master)->refcount, drm_master_destroy); ++ *master = NULL; ++} ++ ++int drm_setmaster_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ if (file_priv->minor->master && file_priv->minor->master != file_priv->master) ++ return -EINVAL; ++ ++ if (!file_priv->master) ++ return -EINVAL; ++ ++ if (!file_priv->minor->master && ++ file_priv->minor->master != file_priv->master) { ++ mutex_lock(&dev->struct_mutex); ++ file_priv->minor->master = drm_master_get(file_priv->master); ++ mutex_lock(&dev->struct_mutex); ++ } ++ ++ return 0; ++} ++ ++int drm_dropmaster_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ if (!file_priv->master) ++ return -EINVAL; ++ mutex_lock(&dev->struct_mutex); ++ drm_master_put(&file_priv->minor->master); ++ mutex_unlock(&dev->struct_mutex); ++ return 0; ++} ++ + static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev, + const struct pci_device_id *ent, + struct drm_driver *driver) +@@ -92,7 +198,6 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev, + + spin_lock_init(&dev->count_lock); + spin_lock_init(&dev->drw_lock); +- spin_lock_init(&dev->lock.spinlock); + init_timer(&dev->timer); + mutex_init(&dev->struct_mutex); + mutex_init(&dev->ctxlist_mutex); +@@ -140,9 +245,6 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev, + } + } + +- if (dev->driver->load) +- if ((retcode = dev->driver->load(dev, ent->driver_data))) +- goto error_out_unreg; + + retcode = drm_ctxbitmap_init(dev); + if (retcode) { +@@ -200,6 +302,7 @@ static int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int t + new_minor->device = MKDEV(DRM_MAJOR, minor_id); + new_minor->dev = dev; + new_minor->index = minor_id; ++ INIT_LIST_HEAD(&new_minor->master_list); + + idr_replace(&drm_minors_idr, new_minor, minor_id); + +@@ -267,8 +370,30 @@ int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent, + printk(KERN_ERR "DRM: Fill_in_dev failed.\n"); + goto err_g2; + } ++ ++ if (drm_core_check_feature(dev, DRIVER_MODESET)) { ++ ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL); ++ if (ret) ++ goto err_g2; ++ } ++ + if ((ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY))) +- goto err_g2; ++ goto err_g3; ++ ++ if (dev->driver->load) { ++ ret = dev->driver->load(dev, ent->driver_data); ++ if (ret) ++ goto err_g3; ++ } ++ ++ /* setup the grouping for the legacy output */ ++ if (drm_core_check_feature(dev, DRIVER_MODESET)) { ++ ret = drm_mode_group_init_legacy_group(dev, &dev->primary->mode_group); ++ if (ret) ++ goto err_g3; ++ } ++ ++ list_add_tail(&dev->driver_item, &driver->device_list); + + DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", + driver->name, driver->major, driver->minor, driver->patchlevel, +@@ -276,6 +401,8 @@ int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent, + + return 0; + ++err_g3: ++ drm_put_minor(&dev->primary); + err_g2: + pci_disable_device(pdev); + err_g1: +@@ -297,11 +424,6 @@ int drm_put_dev(struct drm_device * dev) + { + DRM_DEBUG("release primary %s\n", dev->driver->pci_driver.name); + +- if (dev->unique) { +- drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER); +- dev->unique = NULL; +- dev->unique_len = 0; +- } + if (dev->devname) { + drm_free(dev->devname, strlen(dev->devname) + 1, + DRM_MEM_DRIVER); +diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c +index 1611b9b..65d72d0 100644 +--- a/drivers/gpu/drm/drm_sysfs.c ++++ b/drivers/gpu/drm/drm_sysfs.c +@@ -20,6 +20,7 @@ + #include "drmP.h" + + #define to_drm_minor(d) container_of(d, struct drm_minor, kdev) ++#define to_drm_connector(d) container_of(d, struct drm_connector, kdev) + + /** + * drm_sysfs_suspend - DRM class suspend hook +@@ -34,7 +35,7 @@ static int drm_sysfs_suspend(struct device *dev, pm_message_t state) + struct drm_minor *drm_minor = to_drm_minor(dev); + struct drm_device *drm_dev = drm_minor->dev; + +- if (drm_dev->driver->suspend) ++ if (drm_minor->type == DRM_MINOR_LEGACY && drm_dev->driver->suspend) + return drm_dev->driver->suspend(drm_dev, state); + + return 0; +@@ -52,7 +53,7 @@ static int drm_sysfs_resume(struct device *dev) + struct drm_minor *drm_minor = to_drm_minor(dev); + struct drm_device *drm_dev = drm_minor->dev; + +- if (drm_dev->driver->resume) ++ if (drm_minor->type == DRM_MINOR_LEGACY && drm_dev->driver->resume) + return drm_dev->driver->resume(drm_dev); + + return 0; +@@ -144,6 +145,323 @@ static void drm_sysfs_device_release(struct device *dev) + return; + } + ++/* ++ * Connector properties ++ */ ++static ssize_t status_show(struct device *device, ++ struct device_attribute *attr, ++ char *buf) ++{ ++ struct drm_connector *connector = to_drm_connector(device); ++ enum drm_connector_status status; ++ ++ status = connector->funcs->detect(connector); ++ return snprintf(buf, PAGE_SIZE, "%s", ++ drm_get_connector_status_name(status)); ++} ++ ++static ssize_t dpms_show(struct device *device, ++ struct device_attribute *attr, ++ char *buf) ++{ ++ struct drm_connector *connector = to_drm_connector(device); ++ struct drm_device *dev = connector->dev; ++ uint64_t dpms_status; ++ int ret; ++ ++ ret = drm_connector_property_get_value(connector, ++ dev->mode_config.dpms_property, ++ &dpms_status); ++ if (ret) ++ return 0; ++ ++ return snprintf(buf, PAGE_SIZE, "%s", ++ drm_get_dpms_name((int)dpms_status)); ++} ++ ++static ssize_t enabled_show(struct device *device, ++ struct device_attribute *attr, ++ char *buf) ++{ ++ struct drm_connector *connector = to_drm_connector(device); ++ ++ return snprintf(buf, PAGE_SIZE, connector->encoder ? "enabled" : ++ "disabled"); ++} ++ ++static ssize_t edid_show(struct kobject *kobj, struct bin_attribute *attr, ++ char *buf, loff_t off, size_t count) ++{ ++ struct device *connector_dev = container_of(kobj, struct device, kobj); ++ struct drm_connector *connector = to_drm_connector(connector_dev); ++ unsigned char *edid; ++ size_t size; ++ ++ if (!connector->edid_blob_ptr) ++ return 0; ++ ++ edid = connector->edid_blob_ptr->data; ++ size = connector->edid_blob_ptr->length; ++ if (!edid) ++ return 0; ++ ++ if (off >= size) ++ return 0; ++ ++ if (off + count > size) ++ count = size - off; ++ memcpy(buf, edid + off, count); ++ ++ return count; ++} ++ ++static ssize_t modes_show(struct device *device, ++ struct device_attribute *attr, ++ char *buf) ++{ ++ struct drm_connector *connector = to_drm_connector(device); ++ struct drm_display_mode *mode; ++ int written = 0; ++ ++ list_for_each_entry(mode, &connector->modes, head) { ++ written += snprintf(buf + written, PAGE_SIZE - written, "%s\n", ++ mode->name); ++ } ++ ++ return written; ++} ++ ++static ssize_t subconnector_show(struct device *device, ++ struct device_attribute *attr, ++ char *buf) ++{ ++ struct drm_connector *connector = to_drm_connector(device); ++ struct drm_device *dev = connector->dev; ++ struct drm_property *prop = NULL; ++ uint64_t subconnector; ++ int is_tv = 0; ++ int ret; ++ ++ switch (connector->connector_type) { ++ case DRM_MODE_CONNECTOR_DVII: ++ prop = dev->mode_config.dvi_i_subconnector_property; ++ break; ++ case DRM_MODE_CONNECTOR_Composite: ++ case DRM_MODE_CONNECTOR_SVIDEO: ++ case DRM_MODE_CONNECTOR_Component: ++ prop = dev->mode_config.tv_subconnector_property; ++ is_tv = 1; ++ break; ++ default: ++ DRM_ERROR("Wrong connector type for this property\n"); ++ return 0; ++ } ++ ++ if (!prop) { ++ DRM_ERROR("Unable to find subconnector property\n"); ++ return 0; ++ } ++ ++ ret = drm_connector_property_get_value(connector, prop, &subconnector); ++ if (ret) ++ return 0; ++ ++ return snprintf(buf, PAGE_SIZE, "%s", is_tv ? ++ drm_get_tv_subconnector_name((int)subconnector) : ++ drm_get_dvi_i_subconnector_name((int)subconnector)); ++} ++ ++static ssize_t select_subconnector_show(struct device *device, ++ struct device_attribute *attr, ++ char *buf) ++{ ++ struct drm_connector *connector = to_drm_connector(device); ++ struct drm_device *dev = connector->dev; ++ struct drm_property *prop = NULL; ++ uint64_t subconnector; ++ int is_tv = 0; ++ int ret; ++ ++ switch (connector->connector_type) { ++ case DRM_MODE_CONNECTOR_DVII: ++ prop = dev->mode_config.dvi_i_select_subconnector_property; ++ break; ++ case DRM_MODE_CONNECTOR_Composite: ++ case DRM_MODE_CONNECTOR_SVIDEO: ++ case DRM_MODE_CONNECTOR_Component: ++ prop = dev->mode_config.tv_select_subconnector_property; ++ is_tv = 1; ++ break; ++ default: ++ DRM_ERROR("Wrong connector type for this property\n"); ++ return 0; ++ } ++ ++ if (!prop) { ++ DRM_ERROR("Unable to find select subconnector property\n"); ++ return 0; ++ } ++ ++ ret = drm_connector_property_get_value(connector, prop, &subconnector); ++ if (ret) ++ return 0; ++ ++ return snprintf(buf, PAGE_SIZE, "%s", is_tv ? ++ drm_get_tv_select_name((int)subconnector) : ++ drm_get_dvi_i_select_name((int)subconnector)); ++} ++ ++static struct device_attribute connector_attrs[] = { ++ __ATTR_RO(status), ++ __ATTR_RO(enabled), ++ __ATTR_RO(dpms), ++ __ATTR_RO(modes), ++}; ++ ++/* These attributes are for both DVI-I connectors and all types of tv-out. */ ++static struct device_attribute connector_attrs_opt1[] = { ++ __ATTR_RO(subconnector), ++ __ATTR_RO(select_subconnector), ++}; ++ ++static struct bin_attribute edid_attr = { ++ .attr.name = "edid", ++ .size = 128, ++ .read = edid_show, ++}; ++ ++/** ++ * drm_sysfs_connector_add - add an connector to sysfs ++ * @connector: connector to add ++ * ++ * Create an connector device in sysfs, along with its associated connector ++ * properties (so far, connection status, dpms, mode list & edid) and ++ * generate a hotplug event so userspace knows there's a new connector ++ * available. ++ * ++ * Note: ++ * This routine should only be called *once* for each DRM minor registered. ++ * A second call for an already registered device will trigger the BUG_ON ++ * below. ++ */ ++int drm_sysfs_connector_add(struct drm_connector *connector) ++{ ++ struct drm_device *dev = connector->dev; ++ int ret = 0, i, j; ++ ++ /* We shouldn't get called more than once for the same connector */ ++ BUG_ON(device_is_registered(&connector->kdev)); ++ ++ connector->kdev.parent = &dev->primary->kdev; ++ connector->kdev.class = drm_class; ++ connector->kdev.release = drm_sysfs_device_release; ++ ++ DRM_DEBUG("adding \"%s\" to sysfs\n", ++ drm_get_connector_name(connector)); ++ ++ snprintf(connector->kdev.bus_id, BUS_ID_SIZE, "card%d-%s", ++ dev->primary->index, drm_get_connector_name(connector)); ++ ret = device_register(&connector->kdev); ++ ++ if (ret) { ++ DRM_ERROR("failed to register connector device: %d\n", ret); ++ goto out; ++ } ++ ++ /* Standard attributes */ ++ ++ for (i = 0; i < ARRAY_SIZE(connector_attrs); i++) { ++ ret = device_create_file(&connector->kdev, &connector_attrs[i]); ++ if (ret) ++ goto err_out_files; ++ } ++ ++ /* Optional attributes */ ++ /* ++ * In the long run it maybe a good idea to make one set of ++ * optionals per connector type. ++ */ ++ switch (connector->connector_type) { ++ case DRM_MODE_CONNECTOR_DVII: ++ case DRM_MODE_CONNECTOR_Composite: ++ case DRM_MODE_CONNECTOR_SVIDEO: ++ case DRM_MODE_CONNECTOR_Component: ++ for (i = 0; i < ARRAY_SIZE(connector_attrs_opt1); i++) { ++ ret = device_create_file(&connector->kdev, &connector_attrs_opt1[i]); ++ if (ret) ++ goto err_out_files; ++ } ++ break; ++ default: ++ break; ++ } ++ ++ ret = sysfs_create_bin_file(&connector->kdev.kobj, &edid_attr); ++ if (ret) ++ goto err_out_files; ++ ++ /* Let userspace know we have a new connector */ ++ drm_sysfs_hotplug_event(dev); ++ ++ return 0; ++ ++err_out_files: ++ if (i > 0) ++ for (j = 0; j < i; j++) ++ device_remove_file(&connector->kdev, ++ &connector_attrs[i]); ++ device_unregister(&connector->kdev); ++ ++out: ++ return ret; ++} ++EXPORT_SYMBOL(drm_sysfs_connector_add); ++ ++/** ++ * drm_sysfs_connector_remove - remove an connector device from sysfs ++ * @connector: connector to remove ++ * ++ * Remove @connector and its associated attributes from sysfs. Note that ++ * the device model core will take care of sending the "remove" uevent ++ * at this time, so we don't need to do it. ++ * ++ * Note: ++ * This routine should only be called if the connector was previously ++ * successfully registered. If @connector hasn't been registered yet, ++ * you'll likely see a panic somewhere deep in sysfs code when called. ++ */ ++void drm_sysfs_connector_remove(struct drm_connector *connector) ++{ ++ int i; ++ ++ DRM_DEBUG("removing \"%s\" from sysfs\n", ++ drm_get_connector_name(connector)); ++ ++ for (i = 0; i < ARRAY_SIZE(connector_attrs); i++) ++ device_remove_file(&connector->kdev, &connector_attrs[i]); ++ sysfs_remove_bin_file(&connector->kdev.kobj, &edid_attr); ++ device_unregister(&connector->kdev); ++} ++EXPORT_SYMBOL(drm_sysfs_connector_remove); ++ ++/** ++ * drm_sysfs_hotplug_event - generate a DRM uevent ++ * @dev: DRM device ++ * ++ * Send a uevent for the DRM device specified by @dev. Currently we only ++ * set HOTPLUG=1 in the uevent environment, but this could be expanded to ++ * deal with other types of events. ++ */ ++void drm_sysfs_hotplug_event(struct drm_device *dev) ++{ ++ char *event_string = "HOTPLUG=1"; ++ char *envp[] = { event_string, NULL }; ++ ++ DRM_DEBUG("generating hotplug event\n"); ++ ++ kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, envp); ++} ++ + /** + * drm_sysfs_device_add - adds a class device to sysfs for a character driver + * @dev: DRM device to be added +@@ -163,7 +481,12 @@ int drm_sysfs_device_add(struct drm_minor *minor) + minor->kdev.class = drm_class; + minor->kdev.release = drm_sysfs_device_release; + minor->kdev.devt = minor->device; +- minor_str = "card%d"; ++ if (minor->type == DRM_MINOR_CONTROL) ++ minor_str = "controlD%d"; ++ else if (minor->type == DRM_MINOR_RENDER) ++ minor_str = "renderD%d"; ++ else ++ minor_str = "card%d"; + + snprintf(minor->kdev.bus_id, BUS_ID_SIZE, minor_str, minor->index); + +diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c +index c234c6f..3ffae02 100644 +--- a/drivers/gpu/drm/drm_vm.c ++++ b/drivers/gpu/drm/drm_vm.c +@@ -267,6 +267,9 @@ static void drm_vm_shm_close(struct vm_area_struct *vma) + dmah.size = map->size; + __drm_pci_free(dev, &dmah); + break; ++ case _DRM_GEM: ++ DRM_ERROR("tried to rmmap GEM object\n"); ++ break; + } + drm_free(map, sizeof(*map), DRM_MEM_MAPS); + } +@@ -399,7 +402,7 @@ static struct vm_operations_struct drm_vm_sg_ops = { + * Create a new drm_vma_entry structure as the \p vma private data entry and + * add it to drm_device::vmalist. + */ +-static void drm_vm_open_locked(struct vm_area_struct *vma) ++void drm_vm_open_locked(struct vm_area_struct *vma) + { + struct drm_file *priv = vma->vm_file->private_data; + struct drm_device *dev = priv->minor->dev; +@@ -540,7 +543,7 @@ EXPORT_SYMBOL(drm_core_get_reg_ofs); + * according to the mapping type and remaps the pages. Finally sets the file + * pointer and calls vm_open(). + */ +-static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) ++int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) + { + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->minor->dev; +diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile +index d8fb5d8..dd57a5b 100644 +--- a/drivers/gpu/drm/i915/Makefile ++++ b/drivers/gpu/drm/i915/Makefile +@@ -8,7 +8,22 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \ + i915_gem.o \ + i915_gem_debug.o \ + i915_gem_proc.o \ +- i915_gem_tiling.o ++ i915_gem_tiling.o \ ++ intel_display.o \ ++ intel_crt.o \ ++ intel_lvds.o \ ++ intel_bios.o \ ++ intel_sdvo.o \ ++ intel_modes.o \ ++ intel_i2c.o \ ++ intel_fb.o \ ++ intel_tv.o \ ++ intel_dvo.o \ ++ dvo_ch7xxx.o \ ++ dvo_ch7017.o \ ++ dvo_ivch.o \ ++ dvo_tfp410.o \ ++ dvo_sil164.o + + i915-$(CONFIG_ACPI) += i915_opregion.o + i915-$(CONFIG_COMPAT) += i915_ioc32.o +diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h +new file mode 100644 +index 0000000..e80866c +--- /dev/null ++++ b/drivers/gpu/drm/i915/dvo.h +@@ -0,0 +1,151 @@ ++/* ++ * Copyright © 2006 Eric Anholt ++ * ++ * Permission to use, copy, modify, distribute, and sell this software and its ++ * documentation for any purpose is hereby granted without fee, provided that ++ * the above copyright notice appear in all copies and that both that copyright ++ * notice and this permission notice appear in supporting documentation, and ++ * that the name of the copyright holders not be used in advertising or ++ * publicity pertaining to distribution of the software without specific, ++ * written prior permission. The copyright holders make no representations ++ * about the suitability of this software for any purpose. It is provided "as ++ * is" without express or implied warranty. ++ * ++ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, ++ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO ++ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR ++ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, ++ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER ++ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE ++ * OF THIS SOFTWARE. ++ */ ++ ++#ifndef _INTEL_DVO_H ++#define _INTEL_DVO_H ++ ++#include ++#include "drmP.h" ++#include "drm.h" ++#include "drm_crtc.h" ++#include "intel_drv.h" ++ ++struct intel_dvo_device { ++ char *name; ++ int type; ++ /* DVOA/B/C output register */ ++ u32 dvo_reg; ++ /* GPIO register used for i2c bus to control this device */ ++ u32 gpio; ++ int slave_addr; ++ struct intel_i2c_chan *i2c_bus; ++ ++ const struct intel_dvo_dev_ops *dev_ops; ++ void *dev_priv; ++ ++ struct drm_display_mode *panel_fixed_mode; ++ bool panel_wants_dither; ++}; ++ ++struct intel_dvo_dev_ops { ++ /* ++ * Initialize the device at startup time. ++ * Returns NULL if the device does not exist. ++ */ ++ bool (*init)(struct intel_dvo_device *dvo, ++ struct intel_i2c_chan *i2cbus); ++ ++ /* ++ * Called to allow the output a chance to create properties after the ++ * RandR objects have been created. ++ */ ++ void (*create_resources)(struct intel_dvo_device *dvo); ++ ++ /* ++ * Turn on/off output or set intermediate power levels if available. ++ * ++ * Unsupported intermediate modes drop to the lower power setting. ++ * If the mode is DPMSModeOff, the output must be disabled, ++ * as the DPLL may be disabled afterwards. ++ */ ++ void (*dpms)(struct intel_dvo_device *dvo, int mode); ++ ++ /* ++ * Saves the output's state for restoration on VT switch. ++ */ ++ void (*save)(struct intel_dvo_device *dvo); ++ ++ /* ++ * Restore's the output's state at VT switch. ++ */ ++ void (*restore)(struct intel_dvo_device *dvo); ++ ++ /* ++ * Callback for testing a video mode for a given output. ++ * ++ * This function should only check for cases where a mode can't ++ * be supported on the output specifically, and not represent ++ * generic CRTC limitations. ++ * ++ * \return MODE_OK if the mode is valid, or another MODE_* otherwise. ++ */ ++ int (*mode_valid)(struct intel_dvo_device *dvo, ++ struct drm_display_mode *mode); ++ ++ /* ++ * Callback to adjust the mode to be set in the CRTC. ++ * ++ * This allows an output to adjust the clock or even the entire set of ++ * timings, which is used for panels with fixed timings or for ++ * buses with clock limitations. ++ */ ++ bool (*mode_fixup)(struct intel_dvo_device *dvo, ++ struct drm_display_mode *mode, ++ struct drm_display_mode *adjusted_mode); ++ ++ /* ++ * Callback for preparing mode changes on an output ++ */ ++ void (*prepare)(struct intel_dvo_device *dvo); ++ ++ /* ++ * Callback for committing mode changes on an output ++ */ ++ void (*commit)(struct intel_dvo_device *dvo); ++ ++ /* ++ * Callback for setting up a video mode after fixups have been made. ++ * ++ * This is only called while the output is disabled. The dpms callback ++ * must be all that's necessary for the output, to turn the output on ++ * after this function is called. ++ */ ++ void (*mode_set)(struct intel_dvo_device *dvo, ++ struct drm_display_mode *mode, ++ struct drm_display_mode *adjusted_mode); ++ ++ /* ++ * Probe for a connected output, and return detect_status. ++ */ ++ enum drm_connector_status (*detect)(struct intel_dvo_device *dvo); ++ ++ /** ++ * Query the device for the modes it provides. ++ * ++ * This function may also update MonInfo, mm_width, and mm_height. ++ * ++ * \return singly-linked list of modes or NULL if no modes found. ++ */ ++ struct drm_display_mode *(*get_modes)(struct intel_dvo_device *dvo); ++ ++ /** ++ * Clean up driver-specific bits of the output ++ */ ++ void (*destroy) (struct intel_dvo_device *dvo); ++ ++ /** ++ * Debugging hook to dump device registers to log file ++ */ ++ void (*dump_regs)(struct intel_dvo_device *dvo); ++}; ++ ++#endif /* _INTEL_DVO_H */ +diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c +new file mode 100644 +index 0000000..03d4b49 +--- /dev/null ++++ b/drivers/gpu/drm/i915/dvo_ch7017.c +@@ -0,0 +1,454 @@ ++/* ++ * Copyright © 2006 Intel Corporation ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING ++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: ++ * Eric Anholt ++ * ++ */ ++ ++#include "dvo.h" ++ ++#define CH7017_TV_DISPLAY_MODE 0x00 ++#define CH7017_FLICKER_FILTER 0x01 ++#define CH7017_VIDEO_BANDWIDTH 0x02 ++#define CH7017_TEXT_ENHANCEMENT 0x03 ++#define CH7017_START_ACTIVE_VIDEO 0x04 ++#define CH7017_HORIZONTAL_POSITION 0x05 ++#define CH7017_VERTICAL_POSITION 0x06 ++#define CH7017_BLACK_LEVEL 0x07 ++#define CH7017_CONTRAST_ENHANCEMENT 0x08 ++#define CH7017_TV_PLL 0x09 ++#define CH7017_TV_PLL_M 0x0a ++#define CH7017_TV_PLL_N 0x0b ++#define CH7017_SUB_CARRIER_0 0x0c ++#define CH7017_CIV_CONTROL 0x10 ++#define CH7017_CIV_0 0x11 ++#define CH7017_CHROMA_BOOST 0x14 ++#define CH7017_CLOCK_MODE 0x1c ++#define CH7017_INPUT_CLOCK 0x1d ++#define CH7017_GPIO_CONTROL 0x1e ++#define CH7017_INPUT_DATA_FORMAT 0x1f ++#define CH7017_CONNECTION_DETECT 0x20 ++#define CH7017_DAC_CONTROL 0x21 ++#define CH7017_BUFFERED_CLOCK_OUTPUT 0x22 ++#define CH7017_DEFEAT_VSYNC 0x47 ++#define CH7017_TEST_PATTERN 0x48 ++ ++#define CH7017_POWER_MANAGEMENT 0x49 ++/** Enables the TV output path. */ ++#define CH7017_TV_EN (1 << 0) ++#define CH7017_DAC0_POWER_DOWN (1 << 1) ++#define CH7017_DAC1_POWER_DOWN (1 << 2) ++#define CH7017_DAC2_POWER_DOWN (1 << 3) ++#define CH7017_DAC3_POWER_DOWN (1 << 4) ++/** Powers down the TV out block, and DAC0-3 */ ++#define CH7017_TV_POWER_DOWN_EN (1 << 5) ++ ++#define CH7017_VERSION_ID 0x4a ++ ++#define CH7017_DEVICE_ID 0x4b ++#define CH7017_DEVICE_ID_VALUE 0x1b ++#define CH7018_DEVICE_ID_VALUE 0x1a ++#define CH7019_DEVICE_ID_VALUE 0x19 ++ ++#define CH7017_XCLK_D2_ADJUST 0x53 ++#define CH7017_UP_SCALER_COEFF_0 0x55 ++#define CH7017_UP_SCALER_COEFF_1 0x56 ++#define CH7017_UP_SCALER_COEFF_2 0x57 ++#define CH7017_UP_SCALER_COEFF_3 0x58 ++#define CH7017_UP_SCALER_COEFF_4 0x59 ++#define CH7017_UP_SCALER_VERTICAL_INC_0 0x5a ++#define CH7017_UP_SCALER_VERTICAL_INC_1 0x5b ++#define CH7017_GPIO_INVERT 0x5c ++#define CH7017_UP_SCALER_HORIZONTAL_INC_0 0x5d ++#define CH7017_UP_SCALER_HORIZONTAL_INC_1 0x5e ++ ++#define CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT 0x5f ++/**< Low bits of horizontal active pixel input */ ++ ++#define CH7017_ACTIVE_INPUT_LINE_OUTPUT 0x60 ++/** High bits of horizontal active pixel input */ ++#define CH7017_LVDS_HAP_INPUT_MASK (0x7 << 0) ++/** High bits of vertical active line output */ ++#define CH7017_LVDS_VAL_HIGH_MASK (0x7 << 3) ++ ++#define CH7017_VERTICAL_ACTIVE_LINE_OUTPUT 0x61 ++/**< Low bits of vertical active line output */ ++ ++#define CH7017_HORIZONTAL_ACTIVE_PIXEL_OUTPUT 0x62 ++/**< Low bits of horizontal active pixel output */ ++ ++#define CH7017_LVDS_POWER_DOWN 0x63 ++/** High bits of horizontal active pixel output */ ++#define CH7017_LVDS_HAP_HIGH_MASK (0x7 << 0) ++/** Enables the LVDS power down state transition */ ++#define CH7017_LVDS_POWER_DOWN_EN (1 << 6) ++/** Enables the LVDS upscaler */ ++#define CH7017_LVDS_UPSCALER_EN (1 << 7) ++#define CH7017_LVDS_POWER_DOWN_DEFAULT_RESERVED 0x08 ++ ++#define CH7017_LVDS_ENCODING 0x64 ++#define CH7017_LVDS_DITHER_2D (1 << 2) ++#define CH7017_LVDS_DITHER_DIS (1 << 3) ++#define CH7017_LVDS_DUAL_CHANNEL_EN (1 << 4) ++#define CH7017_LVDS_24_BIT (1 << 5) ++ ++#define CH7017_LVDS_ENCODING_2 0x65 ++ ++#define CH7017_LVDS_PLL_CONTROL 0x66 ++/** Enables the LVDS panel output path */ ++#define CH7017_LVDS_PANEN (1 << 0) ++/** Enables the LVDS panel backlight */ ++#define CH7017_LVDS_BKLEN (1 << 3) ++ ++#define CH7017_POWER_SEQUENCING_T1 0x67 ++#define CH7017_POWER_SEQUENCING_T2 0x68 ++#define CH7017_POWER_SEQUENCING_T3 0x69 ++#define CH7017_POWER_SEQUENCING_T4 0x6a ++#define CH7017_POWER_SEQUENCING_T5 0x6b ++#define CH7017_GPIO_DRIVER_TYPE 0x6c ++#define CH7017_GPIO_DATA 0x6d ++#define CH7017_GPIO_DIRECTION_CONTROL 0x6e ++ ++#define CH7017_LVDS_PLL_FEEDBACK_DIV 0x71 ++# define CH7017_LVDS_PLL_FEED_BACK_DIVIDER_SHIFT 4 ++# define CH7017_LVDS_PLL_FEED_FORWARD_DIVIDER_SHIFT 0 ++# define CH7017_LVDS_PLL_FEEDBACK_DEFAULT_RESERVED 0x80 ++ ++#define CH7017_LVDS_PLL_VCO_CONTROL 0x72 ++# define CH7017_LVDS_PLL_VCO_DEFAULT_RESERVED 0x80 ++# define CH7017_LVDS_PLL_VCO_SHIFT 4 ++# define CH7017_LVDS_PLL_POST_SCALE_DIV_SHIFT 0 ++ ++#define CH7017_OUTPUTS_ENABLE 0x73 ++# define CH7017_CHARGE_PUMP_LOW 0x0 ++# define CH7017_CHARGE_PUMP_HIGH 0x3 ++# define CH7017_LVDS_CHANNEL_A (1 << 3) ++# define CH7017_LVDS_CHANNEL_B (1 << 4) ++# define CH7017_TV_DAC_A (1 << 5) ++# define CH7017_TV_DAC_B (1 << 6) ++# define CH7017_DDC_SELECT_DC2 (1 << 7) ++ ++#define CH7017_LVDS_OUTPUT_AMPLITUDE 0x74 ++#define CH7017_LVDS_PLL_EMI_REDUCTION 0x75 ++#define CH7017_LVDS_POWER_DOWN_FLICKER 0x76 ++ ++#define CH7017_LVDS_CONTROL_2 0x78 ++# define CH7017_LOOP_FILTER_SHIFT 5 ++# define CH7017_PHASE_DETECTOR_SHIFT 0 ++ ++#define CH7017_BANG_LIMIT_CONTROL 0x7f ++ ++struct ch7017_priv { ++ uint8_t save_hapi; ++ uint8_t save_vali; ++ uint8_t save_valo; ++ uint8_t save_ailo; ++ uint8_t save_lvds_pll_vco; ++ uint8_t save_feedback_div; ++ uint8_t save_lvds_control_2; ++ uint8_t save_outputs_enable; ++ uint8_t save_lvds_power_down; ++ uint8_t save_power_management; ++}; ++ ++static void ch7017_dump_regs(struct intel_dvo_device *dvo); ++static void ch7017_dpms(struct intel_dvo_device *dvo, int mode); ++ ++static bool ch7017_read(struct intel_dvo_device *dvo, int addr, uint8_t *val) ++{ ++ struct intel_i2c_chan *i2cbus = dvo->i2c_bus; ++ u8 out_buf[2]; ++ u8 in_buf[2]; ++ ++ struct i2c_msg msgs[] = { ++ { ++ .addr = i2cbus->slave_addr, ++ .flags = 0, ++ .len = 1, ++ .buf = out_buf, ++ }, ++ { ++ .addr = i2cbus->slave_addr, ++ .flags = I2C_M_RD, ++ .len = 1, ++ .buf = in_buf, ++ } ++ }; ++ ++ out_buf[0] = addr; ++ out_buf[1] = 0; ++ ++ if (i2c_transfer(&i2cbus->adapter, msgs, 2) == 2) { ++ *val= in_buf[0]; ++ return true; ++ }; ++ ++ return false; ++} ++ ++static bool ch7017_write(struct intel_dvo_device *dvo, int addr, uint8_t val) ++{ ++ struct intel_i2c_chan *i2cbus = dvo->i2c_bus; ++ uint8_t out_buf[2]; ++ struct i2c_msg msg = { ++ .addr = i2cbus->slave_addr, ++ .flags = 0, ++ .len = 2, ++ .buf = out_buf, ++ }; ++ ++ out_buf[0] = addr; ++ out_buf[1] = val; ++ ++ if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1) ++ return true; ++ ++ return false; ++} ++ ++/** Probes for a CH7017 on the given bus and slave address. */ ++static bool ch7017_init(struct intel_dvo_device *dvo, ++ struct intel_i2c_chan *i2cbus) ++{ ++ struct ch7017_priv *priv; ++ uint8_t val; ++ ++ priv = kzalloc(sizeof(struct ch7017_priv), GFP_KERNEL); ++ if (priv == NULL) ++ return false; ++ ++ dvo->i2c_bus = i2cbus; ++ dvo->i2c_bus->slave_addr = dvo->slave_addr; ++ dvo->dev_priv = priv; ++ ++ if (!ch7017_read(dvo, CH7017_DEVICE_ID, &val)) ++ goto fail; ++ ++ if (val != CH7017_DEVICE_ID_VALUE && ++ val != CH7018_DEVICE_ID_VALUE && ++ val != CH7019_DEVICE_ID_VALUE) { ++ DRM_DEBUG("ch701x not detected, got %d: from %s Slave %d.\n", ++ val, i2cbus->adapter.name,i2cbus->slave_addr); ++ goto fail; ++ } ++ ++ return true; ++fail: ++ kfree(priv); ++ return false; ++} ++ ++static enum drm_connector_status ch7017_detect(struct intel_dvo_device *dvo) ++{ ++ return connector_status_unknown; ++} ++ ++static enum drm_mode_status ch7017_mode_valid(struct intel_dvo_device *dvo, ++ struct drm_display_mode *mode) ++{ ++ if (mode->clock > 160000) ++ return MODE_CLOCK_HIGH; ++ ++ return MODE_OK; ++} ++ ++static void ch7017_mode_set(struct intel_dvo_device *dvo, ++ struct drm_display_mode *mode, ++ struct drm_display_mode *adjusted_mode) ++{ ++ uint8_t lvds_pll_feedback_div, lvds_pll_vco_control; ++ uint8_t outputs_enable, lvds_control_2, lvds_power_down; ++ uint8_t horizontal_active_pixel_input; ++ uint8_t horizontal_active_pixel_output, vertical_active_line_output; ++ uint8_t active_input_line_output; ++ ++ DRM_DEBUG("Registers before mode setting\n"); ++ ch7017_dump_regs(dvo); ++ ++ /* LVDS PLL settings from page 75 of 7017-7017ds.pdf*/ ++ if (mode->clock < 100000) { ++ outputs_enable = CH7017_LVDS_CHANNEL_A | CH7017_CHARGE_PUMP_LOW; ++ lvds_pll_feedback_div = CH7017_LVDS_PLL_FEEDBACK_DEFAULT_RESERVED | ++ (2 << CH7017_LVDS_PLL_FEED_BACK_DIVIDER_SHIFT) | ++ (13 << CH7017_LVDS_PLL_FEED_FORWARD_DIVIDER_SHIFT); ++ lvds_pll_vco_control = CH7017_LVDS_PLL_VCO_DEFAULT_RESERVED | ++ (2 << CH7017_LVDS_PLL_VCO_SHIFT) | ++ (3 << CH7017_LVDS_PLL_POST_SCALE_DIV_SHIFT); ++ lvds_control_2 = (1 << CH7017_LOOP_FILTER_SHIFT) | ++ (0 << CH7017_PHASE_DETECTOR_SHIFT); ++ } else { ++ outputs_enable = CH7017_LVDS_CHANNEL_A | CH7017_CHARGE_PUMP_HIGH; ++ lvds_pll_feedback_div = CH7017_LVDS_PLL_FEEDBACK_DEFAULT_RESERVED | ++ (2 << CH7017_LVDS_PLL_FEED_BACK_DIVIDER_SHIFT) | ++ (3 << CH7017_LVDS_PLL_FEED_FORWARD_DIVIDER_SHIFT); ++ lvds_pll_feedback_div = 35; ++ lvds_control_2 = (3 << CH7017_LOOP_FILTER_SHIFT) | ++ (0 << CH7017_PHASE_DETECTOR_SHIFT); ++ if (1) { /* XXX: dual channel panel detection. Assume yes for now. */ ++ outputs_enable |= CH7017_LVDS_CHANNEL_B; ++ lvds_pll_vco_control = CH7017_LVDS_PLL_VCO_DEFAULT_RESERVED | ++ (2 << CH7017_LVDS_PLL_VCO_SHIFT) | ++ (13 << CH7017_LVDS_PLL_POST_SCALE_DIV_SHIFT); ++ } else { ++ lvds_pll_vco_control = CH7017_LVDS_PLL_VCO_DEFAULT_RESERVED | ++ (1 << CH7017_LVDS_PLL_VCO_SHIFT) | ++ (13 << CH7017_LVDS_PLL_POST_SCALE_DIV_SHIFT); ++ } ++ } ++ ++ horizontal_active_pixel_input = mode->hdisplay & 0x00ff; ++ ++ vertical_active_line_output = mode->vdisplay & 0x00ff; ++ horizontal_active_pixel_output = mode->hdisplay & 0x00ff; ++ ++ active_input_line_output = ((mode->hdisplay & 0x0700) >> 8) | ++ (((mode->vdisplay & 0x0700) >> 8) << 3); ++ ++ lvds_power_down = CH7017_LVDS_POWER_DOWN_DEFAULT_RESERVED | ++ (mode->hdisplay & 0x0700) >> 8; ++ ++ ch7017_dpms(dvo, DRM_MODE_DPMS_OFF); ++ ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT, ++ horizontal_active_pixel_input); ++ ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_OUTPUT, ++ horizontal_active_pixel_output); ++ ch7017_write(dvo, CH7017_VERTICAL_ACTIVE_LINE_OUTPUT, ++ vertical_active_line_output); ++ ch7017_write(dvo, CH7017_ACTIVE_INPUT_LINE_OUTPUT, ++ active_input_line_output); ++ ch7017_write(dvo, CH7017_LVDS_PLL_VCO_CONTROL, lvds_pll_vco_control); ++ ch7017_write(dvo, CH7017_LVDS_PLL_FEEDBACK_DIV, lvds_pll_feedback_div); ++ ch7017_write(dvo, CH7017_LVDS_CONTROL_2, lvds_control_2); ++ ch7017_write(dvo, CH7017_OUTPUTS_ENABLE, outputs_enable); ++ ++ /* Turn the LVDS back on with new settings. */ ++ ch7017_write(dvo, CH7017_LVDS_POWER_DOWN, lvds_power_down); ++ ++ DRM_DEBUG("Registers after mode setting\n"); ++ ch7017_dump_regs(dvo); ++} ++ ++/* set the CH7017 power state */ ++static void ch7017_dpms(struct intel_dvo_device *dvo, int mode) ++{ ++ uint8_t val; ++ ++ ch7017_read(dvo, CH7017_LVDS_POWER_DOWN, &val); ++ ++ /* Turn off TV/VGA, and never turn it on since we don't support it. */ ++ ch7017_write(dvo, CH7017_POWER_MANAGEMENT, ++ CH7017_DAC0_POWER_DOWN | ++ CH7017_DAC1_POWER_DOWN | ++ CH7017_DAC2_POWER_DOWN | ++ CH7017_DAC3_POWER_DOWN | ++ CH7017_TV_POWER_DOWN_EN); ++ ++ if (mode == DRM_MODE_DPMS_ON) { ++ /* Turn on the LVDS */ ++ ch7017_write(dvo, CH7017_LVDS_POWER_DOWN, ++ val & ~CH7017_LVDS_POWER_DOWN_EN); ++ } else { ++ /* Turn off the LVDS */ ++ ch7017_write(dvo, CH7017_LVDS_POWER_DOWN, ++ val | CH7017_LVDS_POWER_DOWN_EN); ++ } ++ ++ /* XXX: Should actually wait for update power status somehow */ ++ udelay(20000); ++} ++ ++static void ch7017_dump_regs(struct intel_dvo_device *dvo) ++{ ++ uint8_t val; ++ ++#define DUMP(reg) \ ++do { \ ++ ch7017_read(dvo, reg, &val); \ ++ DRM_DEBUG(#reg ": %02x\n", val); \ ++} while (0) ++ ++ DUMP(CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT); ++ DUMP(CH7017_HORIZONTAL_ACTIVE_PIXEL_OUTPUT); ++ DUMP(CH7017_VERTICAL_ACTIVE_LINE_OUTPUT); ++ DUMP(CH7017_ACTIVE_INPUT_LINE_OUTPUT); ++ DUMP(CH7017_LVDS_PLL_VCO_CONTROL); ++ DUMP(CH7017_LVDS_PLL_FEEDBACK_DIV); ++ DUMP(CH7017_LVDS_CONTROL_2); ++ DUMP(CH7017_OUTPUTS_ENABLE); ++ DUMP(CH7017_LVDS_POWER_DOWN); ++} ++ ++static void ch7017_save(struct intel_dvo_device *dvo) ++{ ++ struct ch7017_priv *priv = dvo->dev_priv; ++ ++ ch7017_read(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT, &priv->save_hapi); ++ ch7017_read(dvo, CH7017_VERTICAL_ACTIVE_LINE_OUTPUT, &priv->save_valo); ++ ch7017_read(dvo, CH7017_ACTIVE_INPUT_LINE_OUTPUT, &priv->save_ailo); ++ ch7017_read(dvo, CH7017_LVDS_PLL_VCO_CONTROL, &priv->save_lvds_pll_vco); ++ ch7017_read(dvo, CH7017_LVDS_PLL_FEEDBACK_DIV, &priv->save_feedback_div); ++ ch7017_read(dvo, CH7017_LVDS_CONTROL_2, &priv->save_lvds_control_2); ++ ch7017_read(dvo, CH7017_OUTPUTS_ENABLE, &priv->save_outputs_enable); ++ ch7017_read(dvo, CH7017_LVDS_POWER_DOWN, &priv->save_lvds_power_down); ++ ch7017_read(dvo, CH7017_POWER_MANAGEMENT, &priv->save_power_management); ++} ++ ++static void ch7017_restore(struct intel_dvo_device *dvo) ++{ ++ struct ch7017_priv *priv = dvo->dev_priv; ++ ++ /* Power down before changing mode */ ++ ch7017_dpms(dvo, DRM_MODE_DPMS_OFF); ++ ++ ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT, priv->save_hapi); ++ ch7017_write(dvo, CH7017_VERTICAL_ACTIVE_LINE_OUTPUT, priv->save_valo); ++ ch7017_write(dvo, CH7017_ACTIVE_INPUT_LINE_OUTPUT, priv->save_ailo); ++ ch7017_write(dvo, CH7017_LVDS_PLL_VCO_CONTROL, priv->save_lvds_pll_vco); ++ ch7017_write(dvo, CH7017_LVDS_PLL_FEEDBACK_DIV, priv->save_feedback_div); ++ ch7017_write(dvo, CH7017_LVDS_CONTROL_2, priv->save_lvds_control_2); ++ ch7017_write(dvo, CH7017_OUTPUTS_ENABLE, priv->save_outputs_enable); ++ ch7017_write(dvo, CH7017_LVDS_POWER_DOWN, priv->save_lvds_power_down); ++ ch7017_write(dvo, CH7017_POWER_MANAGEMENT, priv->save_power_management); ++} ++ ++static void ch7017_destroy(struct intel_dvo_device *dvo) ++{ ++ struct ch7017_priv *priv = dvo->dev_priv; ++ ++ if (priv) { ++ kfree(priv); ++ dvo->dev_priv = NULL; ++ } ++} ++ ++struct intel_dvo_dev_ops ch7017_ops = { ++ .init = ch7017_init, ++ .detect = ch7017_detect, ++ .mode_valid = ch7017_mode_valid, ++ .mode_set = ch7017_mode_set, ++ .dpms = ch7017_dpms, ++ .dump_regs = ch7017_dump_regs, ++ .save = ch7017_save, ++ .restore = ch7017_restore, ++ .destroy = ch7017_destroy, ++}; +diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c +new file mode 100644 +index 0000000..d2fd95d +--- /dev/null ++++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c +@@ -0,0 +1,368 @@ ++/************************************************************************** ++ ++Copyright © 2006 Dave Airlie ++ ++All Rights Reserved. ++ ++Permission is hereby granted, free of charge, to any person obtaining a ++copy of this software and associated documentation files (the ++"Software"), to deal in the Software without restriction, including ++without limitation the rights to use, copy, modify, merge, publish, ++distribute, sub license, and/or sell copies of the Software, and to ++permit persons to whom the Software is furnished to do so, subject to ++the following conditions: ++ ++The above copyright notice and this permission notice (including the ++next paragraph) shall be included in all copies or substantial portions ++of the Software. ++ ++THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS ++OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. ++IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ++ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, ++TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE ++SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ ++**************************************************************************/ ++ ++#include "dvo.h" ++ ++#define CH7xxx_REG_VID 0x4a ++#define CH7xxx_REG_DID 0x4b ++ ++#define CH7011_VID 0x83 /* 7010 as well */ ++#define CH7009A_VID 0x84 ++#define CH7009B_VID 0x85 ++#define CH7301_VID 0x95 ++ ++#define CH7xxx_VID 0x84 ++#define CH7xxx_DID 0x17 ++ ++#define CH7xxx_NUM_REGS 0x4c ++ ++#define CH7xxx_CM 0x1c ++#define CH7xxx_CM_XCM (1<<0) ++#define CH7xxx_CM_MCP (1<<2) ++#define CH7xxx_INPUT_CLOCK 0x1d ++#define CH7xxx_GPIO 0x1e ++#define CH7xxx_GPIO_HPIR (1<<3) ++#define CH7xxx_IDF 0x1f ++ ++#define CH7xxx_IDF_HSP (1<<3) ++#define CH7xxx_IDF_VSP (1<<4) ++ ++#define CH7xxx_CONNECTION_DETECT 0x20 ++#define CH7xxx_CDET_DVI (1<<5) ++ ++#define CH7301_DAC_CNTL 0x21 ++#define CH7301_HOTPLUG 0x23 ++#define CH7xxx_TCTL 0x31 ++#define CH7xxx_TVCO 0x32 ++#define CH7xxx_TPCP 0x33 ++#define CH7xxx_TPD 0x34 ++#define CH7xxx_TPVT 0x35 ++#define CH7xxx_TLPF 0x36 ++#define CH7xxx_TCT 0x37 ++#define CH7301_TEST_PATTERN 0x48 ++ ++#define CH7xxx_PM 0x49 ++#define CH7xxx_PM_FPD (1<<0) ++#define CH7301_PM_DACPD0 (1<<1) ++#define CH7301_PM_DACPD1 (1<<2) ++#define CH7301_PM_DACPD2 (1<<3) ++#define CH7xxx_PM_DVIL (1<<6) ++#define CH7xxx_PM_DVIP (1<<7) ++ ++#define CH7301_SYNC_POLARITY 0x56 ++#define CH7301_SYNC_RGB_YUV (1<<0) ++#define CH7301_SYNC_POL_DVI (1<<5) ++ ++/** @file ++ * driver for the Chrontel 7xxx DVI chip over DVO. ++ */ ++ ++static struct ch7xxx_id_struct { ++ uint8_t vid; ++ char *name; ++} ch7xxx_ids[] = { ++ { CH7011_VID, "CH7011" }, ++ { CH7009A_VID, "CH7009A" }, ++ { CH7009B_VID, "CH7009B" }, ++ { CH7301_VID, "CH7301" }, ++}; ++ ++struct ch7xxx_reg_state { ++ uint8_t regs[CH7xxx_NUM_REGS]; ++}; ++ ++struct ch7xxx_priv { ++ bool quiet; ++ ++ struct ch7xxx_reg_state save_reg; ++ struct ch7xxx_reg_state mode_reg; ++ uint8_t save_TCTL, save_TPCP, save_TPD, save_TPVT; ++ uint8_t save_TLPF, save_TCT, save_PM, save_IDF; ++}; ++ ++static void ch7xxx_save(struct intel_dvo_device *dvo); ++ ++static char *ch7xxx_get_id(uint8_t vid) ++{ ++ int i; ++ ++ for (i = 0; i < ARRAY_SIZE(ch7xxx_ids); i++) { ++ if (ch7xxx_ids[i].vid == vid) ++ return ch7xxx_ids[i].name; ++ } ++ ++ return NULL; ++} ++ ++/** Reads an 8 bit register */ ++static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) ++{ ++ struct ch7xxx_priv *ch7xxx= dvo->dev_priv; ++ struct intel_i2c_chan *i2cbus = dvo->i2c_bus; ++ u8 out_buf[2]; ++ u8 in_buf[2]; ++ ++ struct i2c_msg msgs[] = { ++ { ++ .addr = i2cbus->slave_addr, ++ .flags = 0, ++ .len = 1, ++ .buf = out_buf, ++ }, ++ { ++ .addr = i2cbus->slave_addr, ++ .flags = I2C_M_RD, ++ .len = 1, ++ .buf = in_buf, ++ } ++ }; ++ ++ out_buf[0] = addr; ++ out_buf[1] = 0; ++ ++ if (i2c_transfer(&i2cbus->adapter, msgs, 2) == 2) { ++ *ch = in_buf[0]; ++ return true; ++ }; ++ ++ if (!ch7xxx->quiet) { ++ DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n", ++ addr, i2cbus->adapter.name, i2cbus->slave_addr); ++ } ++ return false; ++} ++ ++/** Writes an 8 bit register */ ++static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) ++{ ++ struct ch7xxx_priv *ch7xxx = dvo->dev_priv; ++ struct intel_i2c_chan *i2cbus = dvo->i2c_bus; ++ uint8_t out_buf[2]; ++ struct i2c_msg msg = { ++ .addr = i2cbus->slave_addr, ++ .flags = 0, ++ .len = 2, ++ .buf = out_buf, ++ }; ++ ++ out_buf[0] = addr; ++ out_buf[1] = ch; ++ ++ if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1) ++ return true; ++ ++ if (!ch7xxx->quiet) { ++ DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n", ++ addr, i2cbus->adapter.name, i2cbus->slave_addr); ++ } ++ ++ return false; ++} ++ ++static bool ch7xxx_init(struct intel_dvo_device *dvo, ++ struct intel_i2c_chan *i2cbus) ++{ ++ /* this will detect the CH7xxx chip on the specified i2c bus */ ++ struct ch7xxx_priv *ch7xxx; ++ uint8_t vendor, device; ++ char *name; ++ ++ ch7xxx = kzalloc(sizeof(struct ch7xxx_priv), GFP_KERNEL); ++ if (ch7xxx == NULL) ++ return false; ++ ++ dvo->i2c_bus = i2cbus; ++ dvo->i2c_bus->slave_addr = dvo->slave_addr; ++ dvo->dev_priv = ch7xxx; ++ ch7xxx->quiet = true; ++ ++ if (!ch7xxx_readb(dvo, CH7xxx_REG_VID, &vendor)) ++ goto out; ++ ++ name = ch7xxx_get_id(vendor); ++ if (!name) { ++ DRM_DEBUG("ch7xxx not detected; got 0x%02x from %s slave %d.\n", ++ vendor, i2cbus->adapter.name, i2cbus->slave_addr); ++ goto out; ++ } ++ ++ ++ if (!ch7xxx_readb(dvo, CH7xxx_REG_DID, &device)) ++ goto out; ++ ++ if (device != CH7xxx_DID) { ++ DRM_DEBUG("ch7xxx not detected; got 0x%02x from %s slave %d.\n", ++ vendor, i2cbus->adapter.name, i2cbus->slave_addr); ++ goto out; ++ } ++ ++ ch7xxx->quiet = false; ++ DRM_DEBUG("Detected %s chipset, vendor/device ID 0x%02x/0x%02x\n", ++ name, vendor, device); ++ return true; ++out: ++ kfree(ch7xxx); ++ return false; ++} ++ ++static enum drm_connector_status ch7xxx_detect(struct intel_dvo_device *dvo) ++{ ++ uint8_t cdet, orig_pm, pm; ++ ++ ch7xxx_readb(dvo, CH7xxx_PM, &orig_pm); ++ ++ pm = orig_pm; ++ pm &= ~CH7xxx_PM_FPD; ++ pm |= CH7xxx_PM_DVIL | CH7xxx_PM_DVIP; ++ ++ ch7xxx_writeb(dvo, CH7xxx_PM, pm); ++ ++ ch7xxx_readb(dvo, CH7xxx_CONNECTION_DETECT, &cdet); ++ ++ ch7xxx_writeb(dvo, CH7xxx_PM, orig_pm); ++ ++ if (cdet & CH7xxx_CDET_DVI) ++ return connector_status_connected; ++ return connector_status_disconnected; ++} ++ ++static enum drm_mode_status ch7xxx_mode_valid(struct intel_dvo_device *dvo, ++ struct drm_display_mode *mode) ++{ ++ if (mode->clock > 165000) ++ return MODE_CLOCK_HIGH; ++ ++ return MODE_OK; ++} ++ ++static void ch7xxx_mode_set(struct intel_dvo_device *dvo, ++ struct drm_display_mode *mode, ++ struct drm_display_mode *adjusted_mode) ++{ ++ uint8_t tvco, tpcp, tpd, tlpf, idf; ++ ++ if (mode->clock <= 65000) { ++ tvco = 0x23; ++ tpcp = 0x08; ++ tpd = 0x16; ++ tlpf = 0x60; ++ } else { ++ tvco = 0x2d; ++ tpcp = 0x06; ++ tpd = 0x26; ++ tlpf = 0xa0; ++ } ++ ++ ch7xxx_writeb(dvo, CH7xxx_TCTL, 0x00); ++ ch7xxx_writeb(dvo, CH7xxx_TVCO, tvco); ++ ch7xxx_writeb(dvo, CH7xxx_TPCP, tpcp); ++ ch7xxx_writeb(dvo, CH7xxx_TPD, tpd); ++ ch7xxx_writeb(dvo, CH7xxx_TPVT, 0x30); ++ ch7xxx_writeb(dvo, CH7xxx_TLPF, tlpf); ++ ch7xxx_writeb(dvo, CH7xxx_TCT, 0x00); ++ ++ ch7xxx_readb(dvo, CH7xxx_IDF, &idf); ++ ++ idf &= ~(CH7xxx_IDF_HSP | CH7xxx_IDF_VSP); ++ if (mode->flags & DRM_MODE_FLAG_PHSYNC) ++ idf |= CH7xxx_IDF_HSP; ++ ++ if (mode->flags & DRM_MODE_FLAG_PVSYNC) ++ idf |= CH7xxx_IDF_HSP; ++ ++ ch7xxx_writeb(dvo, CH7xxx_IDF, idf); ++} ++ ++/* set the CH7xxx power state */ ++static void ch7xxx_dpms(struct intel_dvo_device *dvo, int mode) ++{ ++ if (mode == DRM_MODE_DPMS_ON) ++ ch7xxx_writeb(dvo, CH7xxx_PM, CH7xxx_PM_DVIL | CH7xxx_PM_DVIP); ++ else ++ ch7xxx_writeb(dvo, CH7xxx_PM, CH7xxx_PM_FPD); ++} ++ ++static void ch7xxx_dump_regs(struct intel_dvo_device *dvo) ++{ ++ struct ch7xxx_priv *ch7xxx = dvo->dev_priv; ++ int i; ++ ++ for (i = 0; i < CH7xxx_NUM_REGS; i++) { ++ if ((i % 8) == 0 ) ++ DRM_DEBUG("\n %02X: ", i); ++ DRM_DEBUG("%02X ", ch7xxx->mode_reg.regs[i]); ++ } ++} ++ ++static void ch7xxx_save(struct intel_dvo_device *dvo) ++{ ++ struct ch7xxx_priv *ch7xxx= dvo->dev_priv; ++ ++ ch7xxx_readb(dvo, CH7xxx_TCTL, &ch7xxx->save_TCTL); ++ ch7xxx_readb(dvo, CH7xxx_TPCP, &ch7xxx->save_TPCP); ++ ch7xxx_readb(dvo, CH7xxx_TPD, &ch7xxx->save_TPD); ++ ch7xxx_readb(dvo, CH7xxx_TPVT, &ch7xxx->save_TPVT); ++ ch7xxx_readb(dvo, CH7xxx_TLPF, &ch7xxx->save_TLPF); ++ ch7xxx_readb(dvo, CH7xxx_PM, &ch7xxx->save_PM); ++ ch7xxx_readb(dvo, CH7xxx_IDF, &ch7xxx->save_IDF); ++} ++ ++static void ch7xxx_restore(struct intel_dvo_device *dvo) ++{ ++ struct ch7xxx_priv *ch7xxx = dvo->dev_priv; ++ ++ ch7xxx_writeb(dvo, CH7xxx_TCTL, ch7xxx->save_TCTL); ++ ch7xxx_writeb(dvo, CH7xxx_TPCP, ch7xxx->save_TPCP); ++ ch7xxx_writeb(dvo, CH7xxx_TPD, ch7xxx->save_TPD); ++ ch7xxx_writeb(dvo, CH7xxx_TPVT, ch7xxx->save_TPVT); ++ ch7xxx_writeb(dvo, CH7xxx_TLPF, ch7xxx->save_TLPF); ++ ch7xxx_writeb(dvo, CH7xxx_IDF, ch7xxx->save_IDF); ++ ch7xxx_writeb(dvo, CH7xxx_PM, ch7xxx->save_PM); ++} ++ ++static void ch7xxx_destroy(struct intel_dvo_device *dvo) ++{ ++ struct ch7xxx_priv *ch7xxx = dvo->dev_priv; ++ ++ if (ch7xxx) { ++ kfree(ch7xxx); ++ dvo->dev_priv = NULL; ++ } ++} ++ ++struct intel_dvo_dev_ops ch7xxx_ops = { ++ .init = ch7xxx_init, ++ .detect = ch7xxx_detect, ++ .mode_valid = ch7xxx_mode_valid, ++ .mode_set = ch7xxx_mode_set, ++ .dpms = ch7xxx_dpms, ++ .dump_regs = ch7xxx_dump_regs, ++ .save = ch7xxx_save, ++ .restore = ch7xxx_restore, ++ .destroy = ch7xxx_destroy, ++}; +diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c +new file mode 100644 +index 0000000..0c8d375 +--- /dev/null ++++ b/drivers/gpu/drm/i915/dvo_ivch.c +@@ -0,0 +1,442 @@ ++/* ++ * Copyright © 2006 Intel Corporation ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING ++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: ++ * Eric Anholt ++ * ++ */ ++ ++#include "dvo.h" ++ ++/* ++ * register definitions for the i82807aa. ++ * ++ * Documentation on this chipset can be found in datasheet #29069001 at ++ * intel.com. ++ */ ++ ++/* ++ * VCH Revision & GMBus Base Addr ++ */ ++#define VR00 0x00 ++# define VR00_BASE_ADDRESS_MASK 0x007f ++ ++/* ++ * Functionality Enable ++ */ ++#define VR01 0x01 ++ ++/* ++ * Enable the panel fitter ++ */ ++# define VR01_PANEL_FIT_ENABLE (1 << 3) ++/* ++ * Enables the LCD display. ++ * ++ * This must not be set while VR01_DVO_BYPASS_ENABLE is set. ++ */ ++# define VR01_LCD_ENABLE (1 << 2) ++/** Enables the DVO repeater. */ ++# define VR01_DVO_BYPASS_ENABLE (1 << 1) ++/** Enables the DVO clock */ ++# define VR01_DVO_ENABLE (1 << 0) ++ ++/* ++ * LCD Interface Format ++ */ ++#define VR10 0x10 ++/** Enables LVDS output instead of CMOS */ ++# define VR10_LVDS_ENABLE (1 << 4) ++/** Enables 18-bit LVDS output. */ ++# define VR10_INTERFACE_1X18 (0 << 2) ++/** Enables 24-bit LVDS or CMOS output */ ++# define VR10_INTERFACE_1X24 (1 << 2) ++/** Enables 2x18-bit LVDS or CMOS output. */ ++# define VR10_INTERFACE_2X18 (2 << 2) ++/** Enables 2x24-bit LVDS output */ ++# define VR10_INTERFACE_2X24 (3 << 2) ++ ++/* ++ * VR20 LCD Horizontal Display Size ++ */ ++#define VR20 0x20 ++ ++/* ++ * LCD Vertical Display Size ++ */ ++#define VR21 0x20 ++ ++/* ++ * Panel power down status ++ */ ++#define VR30 0x30 ++/** Read only bit indicating that the panel is not in a safe poweroff state. */ ++# define VR30_PANEL_ON (1 << 15) ++ ++#define VR40 0x40 ++# define VR40_STALL_ENABLE (1 << 13) ++# define VR40_VERTICAL_INTERP_ENABLE (1 << 12) ++# define VR40_ENHANCED_PANEL_FITTING (1 << 11) ++# define VR40_HORIZONTAL_INTERP_ENABLE (1 << 10) ++# define VR40_AUTO_RATIO_ENABLE (1 << 9) ++# define VR40_CLOCK_GATING_ENABLE (1 << 8) ++ ++/* ++ * Panel Fitting Vertical Ratio ++ * (((image_height - 1) << 16) / ((panel_height - 1))) >> 2 ++ */ ++#define VR41 0x41 ++ ++/* ++ * Panel Fitting Horizontal Ratio ++ * (((image_width - 1) << 16) / ((panel_width - 1))) >> 2 ++ */ ++#define VR42 0x42 ++ ++/* ++ * Horizontal Image Size ++ */ ++#define VR43 0x43 ++ ++/* VR80 GPIO 0 ++ */ ++#define VR80 0x80 ++#define VR81 0x81 ++#define VR82 0x82 ++#define VR83 0x83 ++#define VR84 0x84 ++#define VR85 0x85 ++#define VR86 0x86 ++#define VR87 0x87 ++ ++/* VR88 GPIO 8 ++ */ ++#define VR88 0x88 ++ ++/* Graphics BIOS scratch 0 ++ */ ++#define VR8E 0x8E ++# define VR8E_PANEL_TYPE_MASK (0xf << 0) ++# define VR8E_PANEL_INTERFACE_CMOS (0 << 4) ++# define VR8E_PANEL_INTERFACE_LVDS (1 << 4) ++# define VR8E_FORCE_DEFAULT_PANEL (1 << 5) ++ ++/* Graphics BIOS scratch 1 ++ */ ++#define VR8F 0x8F ++# define VR8F_VCH_PRESENT (1 << 0) ++# define VR8F_DISPLAY_CONN (1 << 1) ++# define VR8F_POWER_MASK (0x3c) ++# define VR8F_POWER_POS (2) ++ ++ ++struct ivch_priv { ++ bool quiet; ++ ++ uint16_t width, height; ++ ++ uint16_t save_VR01; ++ uint16_t save_VR40; ++}; ++ ++ ++static void ivch_dump_regs(struct intel_dvo_device *dvo); ++ ++/** ++ * Reads a register on the ivch. ++ * ++ * Each of the 256 registers are 16 bits long. ++ */ ++static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data) ++{ ++ struct ivch_priv *priv = dvo->dev_priv; ++ struct intel_i2c_chan *i2cbus = dvo->i2c_bus; ++ u8 out_buf[1]; ++ u8 in_buf[2]; ++ ++ struct i2c_msg msgs[] = { ++ { ++ .addr = i2cbus->slave_addr, ++ .flags = I2C_M_RD, ++ .len = 0, ++ }, ++ { ++ .addr = 0, ++ .flags = I2C_M_NOSTART, ++ .len = 1, ++ .buf = out_buf, ++ }, ++ { ++ .addr = i2cbus->slave_addr, ++ .flags = I2C_M_RD | I2C_M_NOSTART, ++ .len = 2, ++ .buf = in_buf, ++ } ++ }; ++ ++ out_buf[0] = addr; ++ ++ if (i2c_transfer(&i2cbus->adapter, msgs, 3) == 3) { ++ *data = (in_buf[1] << 8) | in_buf[0]; ++ return true; ++ }; ++ ++ if (!priv->quiet) { ++ DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n", ++ addr, i2cbus->adapter.name, i2cbus->slave_addr); ++ } ++ return false; ++} ++ ++/** Writes a 16-bit register on the ivch */ ++static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data) ++{ ++ struct ivch_priv *priv = dvo->dev_priv; ++ struct intel_i2c_chan *i2cbus = dvo->i2c_bus; ++ u8 out_buf[3]; ++ struct i2c_msg msg = { ++ .addr = i2cbus->slave_addr, ++ .flags = 0, ++ .len = 3, ++ .buf = out_buf, ++ }; ++ ++ out_buf[0] = addr; ++ out_buf[1] = data & 0xff; ++ out_buf[2] = data >> 8; ++ ++ if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1) ++ return true; ++ ++ if (!priv->quiet) { ++ DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n", ++ addr, i2cbus->adapter.name, i2cbus->slave_addr); ++ } ++ ++ return false; ++} ++ ++/** Probes the given bus and slave address for an ivch */ ++static bool ivch_init(struct intel_dvo_device *dvo, ++ struct intel_i2c_chan *i2cbus) ++{ ++ struct ivch_priv *priv; ++ uint16_t temp; ++ ++ priv = kzalloc(sizeof(struct ivch_priv), GFP_KERNEL); ++ if (priv == NULL) ++ return false; ++ ++ dvo->i2c_bus = i2cbus; ++ dvo->i2c_bus->slave_addr = dvo->slave_addr; ++ dvo->dev_priv = priv; ++ priv->quiet = true; ++ ++ if (!ivch_read(dvo, VR00, &temp)) ++ goto out; ++ priv->quiet = false; ++ ++ /* Since the identification bits are probably zeroes, which doesn't seem ++ * very unique, check that the value in the base address field matches ++ * the address it's responding on. ++ */ ++ if ((temp & VR00_BASE_ADDRESS_MASK) != dvo->slave_addr) { ++ DRM_DEBUG("ivch detect failed due to address mismatch " ++ "(%d vs %d)\n", ++ (temp & VR00_BASE_ADDRESS_MASK), dvo->slave_addr); ++ goto out; ++ } ++ ++ ivch_read(dvo, VR20, &priv->width); ++ ivch_read(dvo, VR21, &priv->height); ++ ++ return true; ++ ++out: ++ kfree(priv); ++ return false; ++} ++ ++static enum drm_connector_status ivch_detect(struct intel_dvo_device *dvo) ++{ ++ return connector_status_connected; ++} ++ ++static enum drm_mode_status ivch_mode_valid(struct intel_dvo_device *dvo, ++ struct drm_display_mode *mode) ++{ ++ if (mode->clock > 112000) ++ return MODE_CLOCK_HIGH; ++ ++ return MODE_OK; ++} ++ ++/** Sets the power state of the panel connected to the ivch */ ++static void ivch_dpms(struct intel_dvo_device *dvo, int mode) ++{ ++ int i; ++ uint16_t vr01, vr30, backlight; ++ ++ /* Set the new power state of the panel. */ ++ if (!ivch_read(dvo, VR01, &vr01)) ++ return; ++ ++ if (mode == DRM_MODE_DPMS_ON) ++ backlight = 1; ++ else ++ backlight = 0; ++ ivch_write(dvo, VR80, backlight); ++ ++ if (mode == DRM_MODE_DPMS_ON) ++ vr01 |= VR01_LCD_ENABLE | VR01_DVO_ENABLE; ++ else ++ vr01 &= ~(VR01_LCD_ENABLE | VR01_DVO_ENABLE); ++ ++ ivch_write(dvo, VR01, vr01); ++ ++ /* Wait for the panel to make its state transition */ ++ for (i = 0; i < 100; i++) { ++ if (!ivch_read(dvo, VR30, &vr30)) ++ break; ++ ++ if (((vr30 & VR30_PANEL_ON) != 0) == (mode == DRM_MODE_DPMS_ON)) ++ break; ++ udelay(1000); ++ } ++ /* wait some more; vch may fail to resync sometimes without this */ ++ udelay(16 * 1000); ++} ++ ++static void ivch_mode_set(struct intel_dvo_device *dvo, ++ struct drm_display_mode *mode, ++ struct drm_display_mode *adjusted_mode) ++{ ++ uint16_t vr40 = 0; ++ uint16_t vr01; ++ ++ vr01 = 0; ++ vr40 = (VR40_STALL_ENABLE | VR40_VERTICAL_INTERP_ENABLE | ++ VR40_HORIZONTAL_INTERP_ENABLE); ++ ++ if (mode->hdisplay != adjusted_mode->hdisplay || ++ mode->vdisplay != adjusted_mode->vdisplay) { ++ uint16_t x_ratio, y_ratio; ++ ++ vr01 |= VR01_PANEL_FIT_ENABLE; ++ vr40 |= VR40_CLOCK_GATING_ENABLE; ++ x_ratio = (((mode->hdisplay - 1) << 16) / ++ (adjusted_mode->hdisplay - 1)) >> 2; ++ y_ratio = (((mode->vdisplay - 1) << 16) / ++ (adjusted_mode->vdisplay - 1)) >> 2; ++ ivch_write (dvo, VR42, x_ratio); ++ ivch_write (dvo, VR41, y_ratio); ++ } else { ++ vr01 &= ~VR01_PANEL_FIT_ENABLE; ++ vr40 &= ~VR40_CLOCK_GATING_ENABLE; ++ } ++ vr40 &= ~VR40_AUTO_RATIO_ENABLE; ++ ++ ivch_write(dvo, VR01, vr01); ++ ivch_write(dvo, VR40, vr40); ++ ++ ivch_dump_regs(dvo); ++} ++ ++static void ivch_dump_regs(struct intel_dvo_device *dvo) ++{ ++ uint16_t val; ++ ++ ivch_read(dvo, VR00, &val); ++ DRM_DEBUG("VR00: 0x%04x\n", val); ++ ivch_read(dvo, VR01, &val); ++ DRM_DEBUG("VR01: 0x%04x\n", val); ++ ivch_read(dvo, VR30, &val); ++ DRM_DEBUG("VR30: 0x%04x\n", val); ++ ivch_read(dvo, VR40, &val); ++ DRM_DEBUG("VR40: 0x%04x\n", val); ++ ++ /* GPIO registers */ ++ ivch_read(dvo, VR80, &val); ++ DRM_DEBUG("VR80: 0x%04x\n", val); ++ ivch_read(dvo, VR81, &val); ++ DRM_DEBUG("VR81: 0x%04x\n", val); ++ ivch_read(dvo, VR82, &val); ++ DRM_DEBUG("VR82: 0x%04x\n", val); ++ ivch_read(dvo, VR83, &val); ++ DRM_DEBUG("VR83: 0x%04x\n", val); ++ ivch_read(dvo, VR84, &val); ++ DRM_DEBUG("VR84: 0x%04x\n", val); ++ ivch_read(dvo, VR85, &val); ++ DRM_DEBUG("VR85: 0x%04x\n", val); ++ ivch_read(dvo, VR86, &val); ++ DRM_DEBUG("VR86: 0x%04x\n", val); ++ ivch_read(dvo, VR87, &val); ++ DRM_DEBUG("VR87: 0x%04x\n", val); ++ ivch_read(dvo, VR88, &val); ++ DRM_DEBUG("VR88: 0x%04x\n", val); ++ ++ /* Scratch register 0 - AIM Panel type */ ++ ivch_read(dvo, VR8E, &val); ++ DRM_DEBUG("VR8E: 0x%04x\n", val); ++ ++ /* Scratch register 1 - Status register */ ++ ivch_read(dvo, VR8F, &val); ++ DRM_DEBUG("VR8F: 0x%04x\n", val); ++} ++ ++static void ivch_save(struct intel_dvo_device *dvo) ++{ ++ struct ivch_priv *priv = dvo->dev_priv; ++ ++ ivch_read(dvo, VR01, &priv->save_VR01); ++ ivch_read(dvo, VR40, &priv->save_VR40); ++} ++ ++static void ivch_restore(struct intel_dvo_device *dvo) ++{ ++ struct ivch_priv *priv = dvo->dev_priv; ++ ++ ivch_write(dvo, VR01, priv->save_VR01); ++ ivch_write(dvo, VR40, priv->save_VR40); ++} ++ ++static void ivch_destroy(struct intel_dvo_device *dvo) ++{ ++ struct ivch_priv *priv = dvo->dev_priv; ++ ++ if (priv) { ++ kfree(priv); ++ dvo->dev_priv = NULL; ++ } ++} ++ ++struct intel_dvo_dev_ops ivch_ops= { ++ .init = ivch_init, ++ .dpms = ivch_dpms, ++ .save = ivch_save, ++ .restore = ivch_restore, ++ .mode_valid = ivch_mode_valid, ++ .mode_set = ivch_mode_set, ++ .detect = ivch_detect, ++ .dump_regs = ivch_dump_regs, ++ .destroy = ivch_destroy, ++}; +diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c +new file mode 100644 +index 0000000..033a4bb +--- /dev/null ++++ b/drivers/gpu/drm/i915/dvo_sil164.c +@@ -0,0 +1,302 @@ ++/************************************************************************** ++ ++Copyright © 2006 Dave Airlie ++ ++All Rights Reserved. ++ ++Permission is hereby granted, free of charge, to any person obtaining a ++copy of this software and associated documentation files (the ++"Software"), to deal in the Software without restriction, including ++without limitation the rights to use, copy, modify, merge, publish, ++distribute, sub license, and/or sell copies of the Software, and to ++permit persons to whom the Software is furnished to do so, subject to ++the following conditions: ++ ++The above copyright notice and this permission notice (including the ++next paragraph) shall be included in all copies or substantial portions ++of the Software. ++ ++THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS ++OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. ++IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ++ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, ++TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE ++SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ ++**************************************************************************/ ++ ++#include "dvo.h" ++ ++#define SIL164_VID 0x0001 ++#define SIL164_DID 0x0006 ++ ++#define SIL164_VID_LO 0x00 ++#define SIL164_VID_HI 0x01 ++#define SIL164_DID_LO 0x02 ++#define SIL164_DID_HI 0x03 ++#define SIL164_REV 0x04 ++#define SIL164_RSVD 0x05 ++#define SIL164_FREQ_LO 0x06 ++#define SIL164_FREQ_HI 0x07 ++ ++#define SIL164_REG8 0x08 ++#define SIL164_8_VEN (1<<5) ++#define SIL164_8_HEN (1<<4) ++#define SIL164_8_DSEL (1<<3) ++#define SIL164_8_BSEL (1<<2) ++#define SIL164_8_EDGE (1<<1) ++#define SIL164_8_PD (1<<0) ++ ++#define SIL164_REG9 0x09 ++#define SIL164_9_VLOW (1<<7) ++#define SIL164_9_MSEL_MASK (0x7<<4) ++#define SIL164_9_TSEL (1<<3) ++#define SIL164_9_RSEN (1<<2) ++#define SIL164_9_HTPLG (1<<1) ++#define SIL164_9_MDI (1<<0) ++ ++#define SIL164_REGC 0x0c ++ ++struct sil164_save_rec { ++ uint8_t reg8; ++ uint8_t reg9; ++ uint8_t regc; ++}; ++ ++struct sil164_priv { ++ //I2CDevRec d; ++ bool quiet; ++ struct sil164_save_rec save_regs; ++ struct sil164_save_rec mode_regs; ++}; ++ ++#define SILPTR(d) ((SIL164Ptr)(d->DriverPrivate.ptr)) ++ ++static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) ++{ ++ struct sil164_priv *sil = dvo->dev_priv; ++ struct intel_i2c_chan *i2cbus = dvo->i2c_bus; ++ u8 out_buf[2]; ++ u8 in_buf[2]; ++ ++ struct i2c_msg msgs[] = { ++ { ++ .addr = i2cbus->slave_addr, ++ .flags = 0, ++ .len = 1, ++ .buf = out_buf, ++ }, ++ { ++ .addr = i2cbus->slave_addr, ++ .flags = I2C_M_RD, ++ .len = 1, ++ .buf = in_buf, ++ } ++ }; ++ ++ out_buf[0] = addr; ++ out_buf[1] = 0; ++ ++ if (i2c_transfer(&i2cbus->adapter, msgs, 2) == 2) { ++ *ch = in_buf[0]; ++ return true; ++ }; ++ ++ if (!sil->quiet) { ++ DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n", ++ addr, i2cbus->adapter.name, i2cbus->slave_addr); ++ } ++ return false; ++} ++ ++static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) ++{ ++ struct sil164_priv *sil= dvo->dev_priv; ++ struct intel_i2c_chan *i2cbus = dvo->i2c_bus; ++ uint8_t out_buf[2]; ++ struct i2c_msg msg = { ++ .addr = i2cbus->slave_addr, ++ .flags = 0, ++ .len = 2, ++ .buf = out_buf, ++ }; ++ ++ out_buf[0] = addr; ++ out_buf[1] = ch; ++ ++ if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1) ++ return true; ++ ++ if (!sil->quiet) { ++ DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n", ++ addr, i2cbus->adapter.name, i2cbus->slave_addr); ++ } ++ ++ return false; ++} ++ ++/* Silicon Image 164 driver for chip on i2c bus */ ++static bool sil164_init(struct intel_dvo_device *dvo, ++ struct intel_i2c_chan *i2cbus) ++{ ++ /* this will detect the SIL164 chip on the specified i2c bus */ ++ struct sil164_priv *sil; ++ unsigned char ch; ++ ++ sil = kzalloc(sizeof(struct sil164_priv), GFP_KERNEL); ++ if (sil == NULL) ++ return false; ++ ++ dvo->i2c_bus = i2cbus; ++ dvo->i2c_bus->slave_addr = dvo->slave_addr; ++ dvo->dev_priv = sil; ++ sil->quiet = true; ++ ++ if (!sil164_readb(dvo, SIL164_VID_LO, &ch)) ++ goto out; ++ ++ if (ch != (SIL164_VID & 0xff)) { ++ DRM_DEBUG("sil164 not detected got %d: from %s Slave %d.\n", ++ ch, i2cbus->adapter.name, i2cbus->slave_addr); ++ goto out; ++ } ++ ++ if (!sil164_readb(dvo, SIL164_DID_LO, &ch)) ++ goto out; ++ ++ if (ch != (SIL164_DID & 0xff)) { ++ DRM_DEBUG("sil164 not detected got %d: from %s Slave %d.\n", ++ ch, i2cbus->adapter.name, i2cbus->slave_addr); ++ goto out; ++ } ++ sil->quiet = false; ++ ++ DRM_DEBUG("init sil164 dvo controller successfully!\n"); ++ return true; ++ ++out: ++ kfree(sil); ++ return false; ++} ++ ++static enum drm_connector_status sil164_detect(struct intel_dvo_device *dvo) ++{ ++ uint8_t reg9; ++ ++ sil164_readb(dvo, SIL164_REG9, ®9); ++ ++ if (reg9 & SIL164_9_HTPLG) ++ return connector_status_connected; ++ else ++ return connector_status_disconnected; ++} ++ ++static enum drm_mode_status sil164_mode_valid(struct intel_dvo_device *dvo, ++ struct drm_display_mode *mode) ++{ ++ return MODE_OK; ++} ++ ++static void sil164_mode_set(struct intel_dvo_device *dvo, ++ struct drm_display_mode *mode, ++ struct drm_display_mode *adjusted_mode) ++{ ++ /* As long as the basics are set up, since we don't have clock ++ * dependencies in the mode setup, we can just leave the ++ * registers alone and everything will work fine. ++ */ ++ /* recommended programming sequence from doc */ ++ /*sil164_writeb(sil, 0x08, 0x30); ++ sil164_writeb(sil, 0x09, 0x00); ++ sil164_writeb(sil, 0x0a, 0x90); ++ sil164_writeb(sil, 0x0c, 0x89); ++ sil164_writeb(sil, 0x08, 0x31);*/ ++ /* don't do much */ ++ return; ++} ++ ++/* set the SIL164 power state */ ++static void sil164_dpms(struct intel_dvo_device *dvo, int mode) ++{ ++ int ret; ++ unsigned char ch; ++ ++ ret = sil164_readb(dvo, SIL164_REG8, &ch); ++ if (ret == false) ++ return; ++ ++ if (mode == DRM_MODE_DPMS_ON) ++ ch |= SIL164_8_PD; ++ else ++ ch &= ~SIL164_8_PD; ++ ++ sil164_writeb(dvo, SIL164_REG8, ch); ++ return; ++} ++ ++static void sil164_dump_regs(struct intel_dvo_device *dvo) ++{ ++ uint8_t val; ++ ++ sil164_readb(dvo, SIL164_FREQ_LO, &val); ++ DRM_DEBUG("SIL164_FREQ_LO: 0x%02x\n", val); ++ sil164_readb(dvo, SIL164_FREQ_HI, &val); ++ DRM_DEBUG("SIL164_FREQ_HI: 0x%02x\n", val); ++ sil164_readb(dvo, SIL164_REG8, &val); ++ DRM_DEBUG("SIL164_REG8: 0x%02x\n", val); ++ sil164_readb(dvo, SIL164_REG9, &val); ++ DRM_DEBUG("SIL164_REG9: 0x%02x\n", val); ++ sil164_readb(dvo, SIL164_REGC, &val); ++ DRM_DEBUG("SIL164_REGC: 0x%02x\n", val); ++} ++ ++static void sil164_save(struct intel_dvo_device *dvo) ++{ ++ struct sil164_priv *sil= dvo->dev_priv; ++ ++ if (!sil164_readb(dvo, SIL164_REG8, &sil->save_regs.reg8)) ++ return; ++ ++ if (!sil164_readb(dvo, SIL164_REG9, &sil->save_regs.reg9)) ++ return; ++ ++ if (!sil164_readb(dvo, SIL164_REGC, &sil->save_regs.regc)) ++ return; ++ ++ return; ++} ++ ++static void sil164_restore(struct intel_dvo_device *dvo) ++{ ++ struct sil164_priv *sil = dvo->dev_priv; ++ ++ /* Restore it powered down initially */ ++ sil164_writeb(dvo, SIL164_REG8, sil->save_regs.reg8 & ~0x1); ++ ++ sil164_writeb(dvo, SIL164_REG9, sil->save_regs.reg9); ++ sil164_writeb(dvo, SIL164_REGC, sil->save_regs.regc); ++ sil164_writeb(dvo, SIL164_REG8, sil->save_regs.reg8); ++} ++ ++static void sil164_destroy(struct intel_dvo_device *dvo) ++{ ++ struct sil164_priv *sil = dvo->dev_priv; ++ ++ if (sil) { ++ kfree(sil); ++ dvo->dev_priv = NULL; ++ } ++} ++ ++struct intel_dvo_dev_ops sil164_ops = { ++ .init = sil164_init, ++ .detect = sil164_detect, ++ .mode_valid = sil164_mode_valid, ++ .mode_set = sil164_mode_set, ++ .dpms = sil164_dpms, ++ .dump_regs = sil164_dump_regs, ++ .save = sil164_save, ++ .restore = sil164_restore, ++ .destroy = sil164_destroy, ++}; +diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c +new file mode 100644 +index 0000000..207fda8 +--- /dev/null ++++ b/drivers/gpu/drm/i915/dvo_tfp410.c +@@ -0,0 +1,335 @@ ++/* ++ * Copyright © 2007 Dave Mueller ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING ++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS ++ * IN THE SOFTWARE. ++ * ++ * Authors: ++ * Dave Mueller ++ * ++ */ ++ ++#include "dvo.h" ++ ++/* register definitions according to the TFP410 data sheet */ ++#define TFP410_VID 0x014C ++#define TFP410_DID 0x0410 ++ ++#define TFP410_VID_LO 0x00 ++#define TFP410_VID_HI 0x01 ++#define TFP410_DID_LO 0x02 ++#define TFP410_DID_HI 0x03 ++#define TFP410_REV 0x04 ++ ++#define TFP410_CTL_1 0x08 ++#define TFP410_CTL_1_TDIS (1<<6) ++#define TFP410_CTL_1_VEN (1<<5) ++#define TFP410_CTL_1_HEN (1<<4) ++#define TFP410_CTL_1_DSEL (1<<3) ++#define TFP410_CTL_1_BSEL (1<<2) ++#define TFP410_CTL_1_EDGE (1<<1) ++#define TFP410_CTL_1_PD (1<<0) ++ ++#define TFP410_CTL_2 0x09 ++#define TFP410_CTL_2_VLOW (1<<7) ++#define TFP410_CTL_2_MSEL_MASK (0x7<<4) ++#define TFP410_CTL_2_MSEL (1<<4) ++#define TFP410_CTL_2_TSEL (1<<3) ++#define TFP410_CTL_2_RSEN (1<<2) ++#define TFP410_CTL_2_HTPLG (1<<1) ++#define TFP410_CTL_2_MDI (1<<0) ++ ++#define TFP410_CTL_3 0x0A ++#define TFP410_CTL_3_DK_MASK (0x7<<5) ++#define TFP410_CTL_3_DK (1<<5) ++#define TFP410_CTL_3_DKEN (1<<4) ++#define TFP410_CTL_3_CTL_MASK (0x7<<1) ++#define TFP410_CTL_3_CTL (1<<1) ++ ++#define TFP410_USERCFG 0x0B ++ ++#define TFP410_DE_DLY 0x32 ++ ++#define TFP410_DE_CTL 0x33 ++#define TFP410_DE_CTL_DEGEN (1<<6) ++#define TFP410_DE_CTL_VSPOL (1<<5) ++#define TFP410_DE_CTL_HSPOL (1<<4) ++#define TFP410_DE_CTL_DEDLY8 (1<<0) ++ ++#define TFP410_DE_TOP 0x34 ++ ++#define TFP410_DE_CNT_LO 0x36 ++#define TFP410_DE_CNT_HI 0x37 ++ ++#define TFP410_DE_LIN_LO 0x38 ++#define TFP410_DE_LIN_HI 0x39 ++ ++#define TFP410_H_RES_LO 0x3A ++#define TFP410_H_RES_HI 0x3B ++ ++#define TFP410_V_RES_LO 0x3C ++#define TFP410_V_RES_HI 0x3D ++ ++struct tfp410_save_rec { ++ uint8_t ctl1; ++ uint8_t ctl2; ++}; ++ ++struct tfp410_priv { ++ bool quiet; ++ ++ struct tfp410_save_rec saved_reg; ++ struct tfp410_save_rec mode_reg; ++}; ++ ++static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) ++{ ++ struct tfp410_priv *tfp = dvo->dev_priv; ++ struct intel_i2c_chan *i2cbus = dvo->i2c_bus; ++ u8 out_buf[2]; ++ u8 in_buf[2]; ++ ++ struct i2c_msg msgs[] = { ++ { ++ .addr = i2cbus->slave_addr, ++ .flags = 0, ++ .len = 1, ++ .buf = out_buf, ++ }, ++ { ++ .addr = i2cbus->slave_addr, ++ .flags = I2C_M_RD, ++ .len = 1, ++ .buf = in_buf, ++ } ++ }; ++ ++ out_buf[0] = addr; ++ out_buf[1] = 0; ++ ++ if (i2c_transfer(&i2cbus->adapter, msgs, 2) == 2) { ++ *ch = in_buf[0]; ++ return true; ++ }; ++ ++ if (!tfp->quiet) { ++ DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n", ++ addr, i2cbus->adapter.name, i2cbus->slave_addr); ++ } ++ return false; ++} ++ ++static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) ++{ ++ struct tfp410_priv *tfp = dvo->dev_priv; ++ struct intel_i2c_chan *i2cbus = dvo->i2c_bus; ++ uint8_t out_buf[2]; ++ struct i2c_msg msg = { ++ .addr = i2cbus->slave_addr, ++ .flags = 0, ++ .len = 2, ++ .buf = out_buf, ++ }; ++ ++ out_buf[0] = addr; ++ out_buf[1] = ch; ++ ++ if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1) ++ return true; ++ ++ if (!tfp->quiet) { ++ DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n", ++ addr, i2cbus->adapter.name, i2cbus->slave_addr); ++ } ++ ++ return false; ++} ++ ++static int tfp410_getid(struct intel_dvo_device *dvo, int addr) ++{ ++ uint8_t ch1, ch2; ++ ++ if (tfp410_readb(dvo, addr+0, &ch1) && ++ tfp410_readb(dvo, addr+1, &ch2)) ++ return ((ch2 << 8) & 0xFF00) | (ch1 & 0x00FF); ++ ++ return -1; ++} ++ ++/* Ti TFP410 driver for chip on i2c bus */ ++static bool tfp410_init(struct intel_dvo_device *dvo, ++ struct intel_i2c_chan *i2cbus) ++{ ++ /* this will detect the tfp410 chip on the specified i2c bus */ ++ struct tfp410_priv *tfp; ++ int id; ++ ++ tfp = kzalloc(sizeof(struct tfp410_priv), GFP_KERNEL); ++ if (tfp == NULL) ++ return false; ++ ++ dvo->i2c_bus = i2cbus; ++ dvo->i2c_bus->slave_addr = dvo->slave_addr; ++ dvo->dev_priv = tfp; ++ tfp->quiet = true; ++ ++ if ((id = tfp410_getid(dvo, TFP410_VID_LO)) != TFP410_VID) { ++ DRM_DEBUG("tfp410 not detected got VID %X: from %s Slave %d.\n", ++ id, i2cbus->adapter.name, i2cbus->slave_addr); ++ goto out; ++ } ++ ++ if ((id = tfp410_getid(dvo, TFP410_DID_LO)) != TFP410_DID) { ++ DRM_DEBUG("tfp410 not detected got DID %X: from %s Slave %d.\n", ++ id, i2cbus->adapter.name, i2cbus->slave_addr); ++ goto out; ++ } ++ tfp->quiet = false; ++ return true; ++out: ++ kfree(tfp); ++ return false; ++} ++ ++static enum drm_connector_status tfp410_detect(struct intel_dvo_device *dvo) ++{ ++ enum drm_connector_status ret = connector_status_disconnected; ++ uint8_t ctl2; ++ ++ if (tfp410_readb(dvo, TFP410_CTL_2, &ctl2)) { ++ if (ctl2 & TFP410_CTL_2_HTPLG) ++ ret = connector_status_connected; ++ else ++ ret = connector_status_disconnected; ++ } ++ ++ return ret; ++} ++ ++static enum drm_mode_status tfp410_mode_valid(struct intel_dvo_device *dvo, ++ struct drm_display_mode *mode) ++{ ++ return MODE_OK; ++} ++ ++static void tfp410_mode_set(struct intel_dvo_device *dvo, ++ struct drm_display_mode *mode, ++ struct drm_display_mode *adjusted_mode) ++{ ++ /* As long as the basics are set up, since we don't have clock dependencies ++ * in the mode setup, we can just leave the registers alone and everything ++ * will work fine. ++ */ ++ /* don't do much */ ++ return; ++} ++ ++/* set the tfp410 power state */ ++static void tfp410_dpms(struct intel_dvo_device *dvo, int mode) ++{ ++ uint8_t ctl1; ++ ++ if (!tfp410_readb(dvo, TFP410_CTL_1, &ctl1)) ++ return; ++ ++ if (mode == DRM_MODE_DPMS_ON) ++ ctl1 |= TFP410_CTL_1_PD; ++ else ++ ctl1 &= ~TFP410_CTL_1_PD; ++ ++ tfp410_writeb(dvo, TFP410_CTL_1, ctl1); ++} ++ ++static void tfp410_dump_regs(struct intel_dvo_device *dvo) ++{ ++ uint8_t val, val2; ++ ++ tfp410_readb(dvo, TFP410_REV, &val); ++ DRM_DEBUG("TFP410_REV: 0x%02X\n", val); ++ tfp410_readb(dvo, TFP410_CTL_1, &val); ++ DRM_DEBUG("TFP410_CTL1: 0x%02X\n", val); ++ tfp410_readb(dvo, TFP410_CTL_2, &val); ++ DRM_DEBUG("TFP410_CTL2: 0x%02X\n", val); ++ tfp410_readb(dvo, TFP410_CTL_3, &val); ++ DRM_DEBUG("TFP410_CTL3: 0x%02X\n", val); ++ tfp410_readb(dvo, TFP410_USERCFG, &val); ++ DRM_DEBUG("TFP410_USERCFG: 0x%02X\n", val); ++ tfp410_readb(dvo, TFP410_DE_DLY, &val); ++ DRM_DEBUG("TFP410_DE_DLY: 0x%02X\n", val); ++ tfp410_readb(dvo, TFP410_DE_CTL, &val); ++ DRM_DEBUG("TFP410_DE_CTL: 0x%02X\n", val); ++ tfp410_readb(dvo, TFP410_DE_TOP, &val); ++ DRM_DEBUG("TFP410_DE_TOP: 0x%02X\n", val); ++ tfp410_readb(dvo, TFP410_DE_CNT_LO, &val); ++ tfp410_readb(dvo, TFP410_DE_CNT_HI, &val2); ++ DRM_DEBUG("TFP410_DE_CNT: 0x%02X%02X\n", val2, val); ++ tfp410_readb(dvo, TFP410_DE_LIN_LO, &val); ++ tfp410_readb(dvo, TFP410_DE_LIN_HI, &val2); ++ DRM_DEBUG("TFP410_DE_LIN: 0x%02X%02X\n", val2, val); ++ tfp410_readb(dvo, TFP410_H_RES_LO, &val); ++ tfp410_readb(dvo, TFP410_H_RES_HI, &val2); ++ DRM_DEBUG("TFP410_H_RES: 0x%02X%02X\n", val2, val); ++ tfp410_readb(dvo, TFP410_V_RES_LO, &val); ++ tfp410_readb(dvo, TFP410_V_RES_HI, &val2); ++ DRM_DEBUG("TFP410_V_RES: 0x%02X%02X\n", val2, val); ++} ++ ++static void tfp410_save(struct intel_dvo_device *dvo) ++{ ++ struct tfp410_priv *tfp = dvo->dev_priv; ++ ++ if (!tfp410_readb(dvo, TFP410_CTL_1, &tfp->saved_reg.ctl1)) ++ return; ++ ++ if (!tfp410_readb(dvo, TFP410_CTL_2, &tfp->saved_reg.ctl2)) ++ return; ++} ++ ++static void tfp410_restore(struct intel_dvo_device *dvo) ++{ ++ struct tfp410_priv *tfp = dvo->dev_priv; ++ ++ /* Restore it powered down initially */ ++ tfp410_writeb(dvo, TFP410_CTL_1, tfp->saved_reg.ctl1 & ~0x1); ++ ++ tfp410_writeb(dvo, TFP410_CTL_2, tfp->saved_reg.ctl2); ++ tfp410_writeb(dvo, TFP410_CTL_1, tfp->saved_reg.ctl1); ++} ++ ++static void tfp410_destroy(struct intel_dvo_device *dvo) ++{ ++ struct tfp410_priv *tfp = dvo->dev_priv; ++ ++ if (tfp) { ++ kfree(tfp); ++ dvo->dev_priv = NULL; ++ } ++} ++ ++struct intel_dvo_dev_ops tfp410_ops = { ++ .init = tfp410_init, ++ .detect = tfp410_detect, ++ .mode_valid = tfp410_mode_valid, ++ .mode_set = tfp410_mode_set, ++ .dpms = tfp410_dpms, ++ .dump_regs = tfp410_dump_regs, ++ .save = tfp410_save, ++ .restore = tfp410_restore, ++ .destroy = tfp410_destroy, ++}; +diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c +index afa8a12..37a9d08 100644 +--- a/drivers/gpu/drm/i915/i915_dma.c ++++ b/drivers/gpu/drm/i915/i915_dma.c +@@ -28,6 +28,8 @@ + + #include "drmP.h" + #include "drm.h" ++#include "drm_crtc_helper.h" ++#include "intel_drv.h" + #include "i915_drm.h" + #include "i915_drv.h" + +@@ -39,6 +41,7 @@ + int i915_wait_ring(struct drm_device * dev, int n, const char *caller) + { + drm_i915_private_t *dev_priv = dev->dev_private; ++ struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; + drm_i915_ring_buffer_t *ring = &(dev_priv->ring); + u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD; + u32 last_acthd = I915_READ(acthd_reg); +@@ -55,8 +58,8 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller) + if (ring->space >= n) + return 0; + +- if (dev_priv->sarea_priv) +- dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; ++ if (master_priv->sarea_priv) ++ master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; + + if (ring->head != last_head) + i = 0; +@@ -121,16 +124,28 @@ static void i915_free_hws(struct drm_device *dev) + void i915_kernel_lost_context(struct drm_device * dev) + { + drm_i915_private_t *dev_priv = dev->dev_private; ++ struct drm_i915_master_private *master_priv; + drm_i915_ring_buffer_t *ring = &(dev_priv->ring); + ++ /* ++ * We should never lose context on the ring with modesetting ++ * as we don't expose it to userspace ++ */ ++ if (drm_core_check_feature(dev, DRIVER_MODESET)) ++ return; ++ + ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR; + ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR; + ring->space = ring->head - (ring->tail + 8); + if (ring->space < 0) + ring->space += ring->Size; + +- if (ring->head == ring->tail && dev_priv->sarea_priv) +- dev_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY; ++ if (!dev->primary->master) ++ return; ++ ++ master_priv = dev->primary->master->driver_priv; ++ if (ring->head == ring->tail && master_priv->sarea_priv) ++ master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY; + } + + static int i915_dma_cleanup(struct drm_device * dev) +@@ -154,25 +169,13 @@ static int i915_dma_cleanup(struct drm_device * dev) + if (I915_NEED_GFX_HWS(dev)) + i915_free_hws(dev); + +- dev_priv->sarea = NULL; +- dev_priv->sarea_priv = NULL; +- + return 0; + } + + static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) + { + drm_i915_private_t *dev_priv = dev->dev_private; +- +- dev_priv->sarea = drm_getsarea(dev); +- if (!dev_priv->sarea) { +- DRM_ERROR("can not find sarea!\n"); +- i915_dma_cleanup(dev); +- return -EINVAL; +- } +- +- dev_priv->sarea_priv = (drm_i915_sarea_t *) +- ((u8 *) dev_priv->sarea->handle + init->sarea_priv_offset); ++ struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; + + if (init->ring_size != 0) { + if (dev_priv->ring.ring_obj != NULL) { +@@ -207,7 +210,8 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) + dev_priv->back_offset = init->back_offset; + dev_priv->front_offset = init->front_offset; + dev_priv->current_page = 0; +- dev_priv->sarea_priv->pf_current_page = dev_priv->current_page; ++ if (master_priv->sarea_priv) ++ master_priv->sarea_priv->pf_current_page = 0; + + /* Allow hardware batchbuffers unless told otherwise. + */ +@@ -222,11 +226,6 @@ static int i915_dma_resume(struct drm_device * dev) + + DRM_DEBUG("%s\n", __func__); + +- if (!dev_priv->sarea) { +- DRM_ERROR("can not find sarea!\n"); +- return -EINVAL; +- } +- + if (dev_priv->ring.map.handle == NULL) { + DRM_ERROR("can not ioremap virtual address for" + " ring buffer\n"); +@@ -435,13 +434,14 @@ i915_emit_box(struct drm_device *dev, + static void i915_emit_breadcrumb(struct drm_device *dev) + { + drm_i915_private_t *dev_priv = dev->dev_private; ++ struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; + RING_LOCALS; + + dev_priv->counter++; + if (dev_priv->counter > 0x7FFFFFFFUL) + dev_priv->counter = 0; +- if (dev_priv->sarea_priv) +- dev_priv->sarea_priv->last_enqueue = dev_priv->counter; ++ if (master_priv->sarea_priv) ++ master_priv->sarea_priv->last_enqueue = dev_priv->counter; + + BEGIN_LP_RING(4); + OUT_RING(MI_STORE_DWORD_INDEX); +@@ -537,15 +537,17 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev, + static int i915_dispatch_flip(struct drm_device * dev) + { + drm_i915_private_t *dev_priv = dev->dev_private; ++ struct drm_i915_master_private *master_priv = ++ dev->primary->master->driver_priv; + RING_LOCALS; + +- if (!dev_priv->sarea_priv) ++ if (!master_priv->sarea_priv) + return -EINVAL; + + DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n", + __func__, + dev_priv->current_page, +- dev_priv->sarea_priv->pf_current_page); ++ master_priv->sarea_priv->pf_current_page); + + i915_kernel_lost_context(dev); + +@@ -572,7 +574,7 @@ static int i915_dispatch_flip(struct drm_device * dev) + OUT_RING(0); + ADVANCE_LP_RING(); + +- dev_priv->sarea_priv->last_enqueue = dev_priv->counter++; ++ master_priv->sarea_priv->last_enqueue = dev_priv->counter++; + + BEGIN_LP_RING(4); + OUT_RING(MI_STORE_DWORD_INDEX); +@@ -581,7 +583,7 @@ static int i915_dispatch_flip(struct drm_device * dev) + OUT_RING(0); + ADVANCE_LP_RING(); + +- dev_priv->sarea_priv->pf_current_page = dev_priv->current_page; ++ master_priv->sarea_priv->pf_current_page = dev_priv->current_page; + return 0; + } + +@@ -611,8 +613,9 @@ static int i915_batchbuffer(struct drm_device *dev, void *data, + struct drm_file *file_priv) + { + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; ++ struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; + drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) +- dev_priv->sarea_priv; ++ master_priv->sarea_priv; + drm_i915_batchbuffer_t *batch = data; + int ret; + +@@ -644,8 +647,9 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data, + struct drm_file *file_priv) + { + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; ++ struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; + drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) +- dev_priv->sarea_priv; ++ master_priv->sarea_priv; + drm_i915_cmdbuffer_t *cmdbuf = data; + int ret; + +@@ -774,6 +778,11 @@ static int i915_set_status_page(struct drm_device *dev, void *data, + return -EINVAL; + } + ++ if (drm_core_check_feature(dev, DRIVER_MODESET)) { ++ WARN(1, "tried to set status page when mode setting active\n"); ++ return 0; ++ } ++ + printk(KERN_DEBUG "set status page addr 0x%08x\n", (u32)hws->addr); + + dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12); +@@ -802,6 +811,220 @@ static int i915_set_status_page(struct drm_device *dev, void *data, + return 0; + } + ++/** ++ * i915_probe_agp - get AGP bootup configuration ++ * @pdev: PCI device ++ * @aperture_size: returns AGP aperture configured size ++ * @preallocated_size: returns size of BIOS preallocated AGP space ++ * ++ * Since Intel integrated graphics are UMA, the BIOS has to set aside ++ * some RAM for the framebuffer at early boot. This code figures out ++ * how much was set aside so we can use it for our own purposes. ++ */ ++int i915_probe_agp(struct drm_device *dev, unsigned long *aperture_size, ++ unsigned long *preallocated_size) ++{ ++ struct pci_dev *bridge_dev; ++ u16 tmp = 0; ++ unsigned long overhead; ++ ++ bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0)); ++ if (!bridge_dev) { ++ DRM_ERROR("bridge device not found\n"); ++ return -1; ++ } ++ ++ /* Get the fb aperture size and "stolen" memory amount. */ ++ pci_read_config_word(bridge_dev, INTEL_GMCH_CTRL, &tmp); ++ pci_dev_put(bridge_dev); ++ ++ *aperture_size = 1024 * 1024; ++ *preallocated_size = 1024 * 1024; ++ ++ switch (dev->pdev->device) { ++ case PCI_DEVICE_ID_INTEL_82830_CGC: ++ case PCI_DEVICE_ID_INTEL_82845G_IG: ++ case PCI_DEVICE_ID_INTEL_82855GM_IG: ++ case PCI_DEVICE_ID_INTEL_82865_IG: ++ if ((tmp & INTEL_GMCH_MEM_MASK) == INTEL_GMCH_MEM_64M) ++ *aperture_size *= 64; ++ else ++ *aperture_size *= 128; ++ break; ++ default: ++ /* 9xx supports large sizes, just look at the length */ ++ *aperture_size = pci_resource_len(dev->pdev, 2); ++ break; ++ } ++ ++ /* ++ * Some of the preallocated space is taken by the GTT ++ * and popup. GTT is 1K per MB of aperture size, and popup is 4K. ++ */ ++ if (IS_G4X(dev)) ++ overhead = 4096; ++ else ++ overhead = (*aperture_size / 1024) + 4096; ++ ++ switch (tmp & INTEL_855_GMCH_GMS_MASK) { ++ case INTEL_855_GMCH_GMS_STOLEN_1M: ++ break; /* 1M already */ ++ case INTEL_855_GMCH_GMS_STOLEN_4M: ++ *preallocated_size *= 4; ++ break; ++ case INTEL_855_GMCH_GMS_STOLEN_8M: ++ *preallocated_size *= 8; ++ break; ++ case INTEL_855_GMCH_GMS_STOLEN_16M: ++ *preallocated_size *= 16; ++ break; ++ case INTEL_855_GMCH_GMS_STOLEN_32M: ++ *preallocated_size *= 32; ++ break; ++ case INTEL_915G_GMCH_GMS_STOLEN_48M: ++ *preallocated_size *= 48; ++ break; ++ case INTEL_915G_GMCH_GMS_STOLEN_64M: ++ *preallocated_size *= 64; ++ break; ++ case INTEL_855_GMCH_GMS_DISABLED: ++ DRM_ERROR("video memory is disabled\n"); ++ return -1; ++ default: ++ DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n", ++ tmp & INTEL_855_GMCH_GMS_MASK); ++ return -1; ++ } ++ *preallocated_size -= overhead; ++ ++ return 0; ++} ++ ++static int i915_load_modeset_init(struct drm_device *dev) ++{ ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ unsigned long agp_size, prealloc_size; ++ int fb_bar = IS_I9XX(dev) ? 2 : 0; ++ int ret = 0; ++ ++ dev->mode_config.fb_base = drm_get_resource_start(dev, fb_bar) & ++ 0xff000000; ++ ++ DRM_DEBUG("*** fb base 0x%08lx\n", dev->mode_config.fb_base); ++ ++ if (IS_MOBILE(dev) || (IS_I9XX(dev) && !IS_I965G(dev) && !IS_G33(dev))) ++ dev_priv->cursor_needs_physical = true; ++ else ++ dev_priv->cursor_needs_physical = false; ++ ++ i915_probe_agp(dev, &agp_size, &prealloc_size); ++ ++ /* Basic memrange allocator for stolen space (aka vram) */ ++ drm_mm_init(&dev_priv->vram, 0, prealloc_size); ++ ++ /* Let GEM Manage from end of prealloc space to end of aperture */ ++ i915_gem_do_init(dev, prealloc_size, agp_size); ++ ++ ret = i915_gem_init_ringbuffer(dev); ++ if (ret) ++ goto out; ++ ++ dev_priv->mm.gtt_mapping = ++ io_mapping_create_wc(dev->agp->base, ++ dev->agp->agp_info.aper_size * 1024*1024); ++ ++ /* Allow hardware batchbuffers unless told otherwise. ++ */ ++ dev_priv->allow_batchbuffer = 1; ++ ++ ret = intel_init_bios(dev); ++ if (ret) ++ DRM_INFO("failed to find VBIOS tables\n"); ++ ++ ret = drm_irq_install(dev); ++ if (ret) ++ goto destroy_ringbuffer; ++ ++ /* FIXME: re-add hotplug support */ ++#if 0 ++ ret = drm_hotplug_init(dev); ++ if (ret) ++ goto destroy_ringbuffer; ++#endif ++ ++ /* Always safe in the mode setting case. */ ++ /* FIXME: do pre/post-mode set stuff in core KMS code */ ++ dev->vblank_disable_allowed = 1; ++ ++ /* ++ * Initialize the hardware status page IRQ location. ++ */ ++ ++ I915_WRITE(INSTPM, (1 << 5) | (1 << 21)); ++ ++ intel_modeset_init(dev); ++ ++ drm_helper_initial_config(dev, false); ++ ++ dev->devname = kstrdup(DRIVER_NAME, GFP_KERNEL); ++ if (!dev->devname) { ++ ret = -ENOMEM; ++ goto modeset_cleanup; ++ } ++ ++ return 0; ++ ++modeset_cleanup: ++ intel_modeset_cleanup(dev); ++destroy_ringbuffer: ++ i915_gem_cleanup_ringbuffer(dev); ++out: ++ return ret; ++} ++ ++int i915_master_create(struct drm_device *dev, struct drm_master *master) ++{ ++ struct drm_i915_master_private *master_priv; ++ ++ master_priv = drm_calloc(1, sizeof(*master_priv), DRM_MEM_DRIVER); ++ if (!master_priv) ++ return -ENOMEM; ++ ++ master->driver_priv = master_priv; ++ return 0; ++} ++ ++void i915_master_destroy(struct drm_device *dev, struct drm_master *master) ++{ ++ struct drm_i915_master_private *master_priv = master->driver_priv; ++ ++ if (!master_priv) ++ return; ++ ++ drm_free(master_priv, sizeof(*master_priv), DRM_MEM_DRIVER); ++ ++ master->driver_priv = NULL; ++} ++ ++ ++int i915_driver_firstopen(struct drm_device *dev) ++{ ++ if (drm_core_check_feature(dev, DRIVER_MODESET)) ++ return 0; ++ return 0; ++} ++ ++/** ++ * i915_driver_load - setup chip and create an initial config ++ * @dev: DRM device ++ * @flags: startup flags ++ * ++ * The driver load routine has to do several things: ++ * - drive output discovery via intel_modeset_init() ++ * - initialize the memory manager ++ * - allocate initial config memory ++ * - setup the DRM framebuffer with the allocated memory ++ */ + int i915_driver_load(struct drm_device *dev, unsigned long flags) + { + struct drm_i915_private *dev_priv = dev->dev_private; +@@ -829,6 +1052,19 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) + size = drm_get_resource_len(dev, mmio_bar); + + dev_priv->regs = ioremap(base, size); ++ if (!dev_priv->regs) { ++ DRM_ERROR("failed to map registers\n"); ++ ret = -EIO; ++ goto free_priv; ++ } ++ ++#ifdef CONFIG_HIGHMEM64G ++ /* don't enable GEM on PAE - needs agp + set_memory_* interface fixes */ ++ dev_priv->has_gem = 0; ++#else ++ /* enable GEM by default */ ++ dev_priv->has_gem = 1; ++#endif + + #ifdef CONFIG_HIGHMEM64G + /* don't enable GEM on PAE - needs agp + set_memory_* interface fixes */ +@@ -844,7 +1080,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) + if (!I915_NEED_GFX_HWS(dev)) { + ret = i915_init_phys_hws(dev); + if (ret != 0) +- return ret; ++ goto out_rmmap; + } + + /* On the 945G/GM, the chipset reports the MSI capability on the +@@ -864,6 +1100,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) + intel_opregion_init(dev); + + spin_lock_init(&dev_priv->user_irq_lock); ++ dev_priv->user_irq_refcount = 0; + + ret = drm_vblank_init(dev, I915_NUM_PIPE); + +@@ -872,6 +1109,20 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) + return ret; + } + ++ if (drm_core_check_feature(dev, DRIVER_MODESET)) { ++ ret = i915_load_modeset_init(dev); ++ if (ret < 0) { ++ DRM_ERROR("failed to init modeset\n"); ++ goto out_rmmap; ++ } ++ } ++ ++ return 0; ++ ++out_rmmap: ++ iounmap(dev_priv->regs); ++free_priv: ++ drm_free(dev_priv, sizeof(struct drm_i915_private), DRM_MEM_DRIVER); + return ret; + } + +@@ -879,16 +1130,29 @@ int i915_driver_unload(struct drm_device *dev) + { + struct drm_i915_private *dev_priv = dev->dev_private; + ++ if (drm_core_check_feature(dev, DRIVER_MODESET)) { ++ io_mapping_free(dev_priv->mm.gtt_mapping); ++ drm_irq_uninstall(dev); ++ } ++ + if (dev->pdev->msi_enabled) + pci_disable_msi(dev->pdev); + +- i915_free_hws(dev); +- + if (dev_priv->regs != NULL) + iounmap(dev_priv->regs); + + intel_opregion_free(dev); + ++ if (drm_core_check_feature(dev, DRIVER_MODESET)) { ++ intel_modeset_cleanup(dev); ++ ++ mutex_lock(&dev->struct_mutex); ++ i915_gem_cleanup_ringbuffer(dev); ++ mutex_unlock(&dev->struct_mutex); ++ drm_mm_takedown(&dev_priv->vram); ++ i915_gem_lastclose(dev); ++ } ++ + drm_free(dev->dev_private, sizeof(drm_i915_private_t), + DRM_MEM_DRIVER); + +@@ -914,12 +1178,26 @@ int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv) + return 0; + } + ++/** ++ * i915_driver_lastclose - clean up after all DRM clients have exited ++ * @dev: DRM device ++ * ++ * Take care of cleaning up after all DRM clients have exited. In the ++ * mode setting case, we want to restore the kernel's initial mode (just ++ * in case the last client left us in a bad state). ++ * ++ * Additionally, in the non-mode setting case, we'll tear down the AGP ++ * and DMA structures, since the kernel won't be using them, and clea ++ * up any GEM state. ++ */ + void i915_driver_lastclose(struct drm_device * dev) + { + drm_i915_private_t *dev_priv = dev->dev_private; + +- if (!dev_priv) ++ if (!dev_priv || drm_core_check_feature(dev, DRIVER_MODESET)) { ++ intelfb_restore(); + return; ++ } + + i915_gem_lastclose(dev); + +@@ -932,7 +1210,8 @@ void i915_driver_lastclose(struct drm_device * dev) + void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) + { + drm_i915_private_t *dev_priv = dev->dev_private; +- i915_mem_release(dev, file_priv, dev_priv->agp_heap); ++ if (!drm_core_check_feature(dev, DRIVER_MODESET)) ++ i915_mem_release(dev, file_priv, dev_priv->agp_heap); + } + + void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv) +@@ -972,6 +1251,7 @@ struct drm_ioctl_desc i915_ioctls[] = { + DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, 0), + DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, 0), + DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, 0), ++ DRM_IOCTL_DEF(DRM_I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, 0), + DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, 0), + DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, 0), + DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, 0), +diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c +index a80ead2..cbee41c 100644 +--- a/drivers/gpu/drm/i915/i915_drv.c ++++ b/drivers/gpu/drm/i915/i915_drv.c +@@ -33,11 +33,22 @@ + #include "i915_drv.h" + + #include "drm_pciids.h" ++#include ++ ++unsigned int i915_modeset = -1; ++module_param_named(modeset, i915_modeset, int, 0400); ++ ++unsigned int i915_fbpercrtc = 0; ++module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400); + + static struct pci_device_id pciidlist[] = { + i915_PCI_IDS + }; + ++#if defined(CONFIG_DRM_I915_KMS) ++MODULE_DEVICE_TABLE(pci, pciidlist); ++#endif ++ + static int i915_suspend(struct drm_device *dev, pm_message_t state) + { + struct drm_i915_private *dev_priv = dev->dev_private; +@@ -81,6 +92,10 @@ static int i915_resume(struct drm_device *dev) + return 0; + } + ++static struct vm_operations_struct i915_gem_vm_ops = { ++ .fault = i915_gem_fault, ++}; ++ + static struct drm_driver driver = { + /* don't use mtrr's here, the Xserver or user space app should + * deal with them for intel hardware. +@@ -107,17 +122,20 @@ static struct drm_driver driver = { + .reclaim_buffers = drm_core_reclaim_buffers, + .get_map_ofs = drm_core_get_map_ofs, + .get_reg_ofs = drm_core_get_reg_ofs, ++ .master_create = i915_master_create, ++ .master_destroy = i915_master_destroy, + .proc_init = i915_gem_proc_init, + .proc_cleanup = i915_gem_proc_cleanup, + .gem_init_object = i915_gem_init_object, + .gem_free_object = i915_gem_free_object, ++ .gem_vm_ops = &i915_gem_vm_ops, + .ioctls = i915_ioctls, + .fops = { + .owner = THIS_MODULE, + .open = drm_open, + .release = drm_release, + .ioctl = drm_ioctl, +- .mmap = drm_mmap, ++ .mmap = drm_gem_mmap, + .poll = drm_poll, + .fasync = drm_fasync, + #ifdef CONFIG_COMPAT +@@ -141,6 +159,28 @@ static struct drm_driver driver = { + static int __init i915_init(void) + { + driver.num_ioctls = i915_max_ioctl; ++ ++ /* ++ * If CONFIG_DRM_I915_KMS is set, default to KMS unless ++ * explicitly disabled with the module pararmeter. ++ * ++ * Otherwise, just follow the parameter (defaulting to off). ++ * ++ * Allow optional vga_text_mode_force boot option to override ++ * the default behavior. ++ */ ++#if defined(CONFIG_DRM_I915_KMS) ++ if (i915_modeset != 0) ++ driver.driver_features |= DRIVER_MODESET; ++#endif ++ if (i915_modeset == 1) ++ driver.driver_features |= DRIVER_MODESET; ++ ++#ifdef CONFIG_VGA_CONSOLE ++ if (vgacon_text_force() && i915_modeset == -1) ++ driver.driver_features &= ~DRIVER_MODESET; ++#endif ++ + return drm_init(&driver); + } + +diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h +index b3cc473..cc26605 100644 +--- a/drivers/gpu/drm/i915/i915_drv.h ++++ b/drivers/gpu/drm/i915/i915_drv.h +@@ -31,6 +31,7 @@ + #define _I915_DRV_H_ + + #include "i915_reg.h" ++#include "intel_bios.h" + #include + + /* General customization: +@@ -103,15 +104,23 @@ struct intel_opregion { + int enabled; + }; + ++struct drm_i915_master_private { ++ drm_local_map_t *sarea; ++ struct _drm_i915_sarea *sarea_priv; ++}; ++#define I915_FENCE_REG_NONE -1 ++ ++struct drm_i915_fence_reg { ++ struct drm_gem_object *obj; ++}; ++ + typedef struct drm_i915_private { + struct drm_device *dev; + + int has_gem; + + void __iomem *regs; +- drm_local_map_t *sarea; + +- drm_i915_sarea_t *sarea_priv; + drm_i915_ring_buffer_t ring; + + drm_dma_handle_t *status_page_dmah; +@@ -144,8 +153,30 @@ typedef struct drm_i915_private { + unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds; + int vblank_pipe; + ++ bool cursor_needs_physical; ++ ++ struct drm_mm vram; ++ ++ int irq_enabled; ++ + struct intel_opregion opregion; + ++ /* LVDS info */ ++ int backlight_duty_cycle; /* restore backlight to this value */ ++ bool panel_wants_dither; ++ struct drm_display_mode *panel_fixed_mode; ++ struct drm_display_mode *vbt_mode; /* if any */ ++ ++ /* Feature bits from the VBIOS */ ++ int int_tv_support:1; ++ int lvds_dither:1; ++ int lvds_vbt:1; ++ int int_crt_support:1; ++ ++ struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */ ++ int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ ++ int num_fence_regs; /* 8 on pre-965, 16 otherwise */ ++ + /* Register state */ + u8 saveLBB; + u32 saveDSPACNTR; +@@ -364,6 +395,21 @@ struct drm_i915_gem_object { + * This is the same as gtt_space->start + */ + uint32_t gtt_offset; ++ /** ++ * Required alignment for the object ++ */ ++ uint32_t gtt_alignment; ++ /** ++ * Fake offset for use by mmap(2) ++ */ ++ uint64_t mmap_offset; ++ ++ /** ++ * Fence register bits (if any) for this object. Will be set ++ * as needed when mapped into the GTT. ++ * Protected by dev->struct_mutex. ++ */ ++ int fence_reg; + + /** Boolean whether this object has a valid gtt offset. */ + int gtt_bound; +@@ -376,6 +422,7 @@ struct drm_i915_gem_object { + + /** Current tiling mode for the object. */ + uint32_t tiling_mode; ++ uint32_t stride; + + /** AGP mapping type (AGP_USER_MEMORY or AGP_USER_CACHED_MEMORY */ + uint32_t agp_type; +@@ -385,6 +432,10 @@ struct drm_i915_gem_object { + * flags which individual pages are valid. + */ + uint8_t *page_cpu_valid; ++ ++ /** User space pin count and filp owning the pin */ ++ uint32_t user_pin_count; ++ struct drm_file *pin_filp; + }; + + /** +@@ -414,8 +465,19 @@ struct drm_i915_file_private { + } mm; + }; + ++enum intel_chip_family { ++ CHIP_I8XX = 0x01, ++ CHIP_I9XX = 0x02, ++ CHIP_I915 = 0x04, ++ CHIP_I965 = 0x08, ++}; ++ + extern struct drm_ioctl_desc i915_ioctls[]; + extern int i915_max_ioctl; ++extern unsigned int i915_fbpercrtc; ++ ++extern int i915_master_create(struct drm_device *dev, struct drm_master *master); ++extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master); + + /* i915_dma.c */ + extern void i915_kernel_lost_context(struct drm_device * dev); +@@ -441,6 +503,7 @@ extern int i915_irq_wait(struct drm_device *dev, void *data, + struct drm_file *file_priv); + void i915_user_irq_get(struct drm_device *dev); + void i915_user_irq_put(struct drm_device *dev); ++extern void i915_enable_interrupt (struct drm_device *dev); + + extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS); + extern void i915_driver_irq_preinstall(struct drm_device * dev); +@@ -487,6 +550,8 @@ int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); + int i915_gem_mmap_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); ++int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); + int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); + int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, +@@ -523,6 +588,16 @@ uint32_t i915_get_gem_seqno(struct drm_device *dev); + void i915_gem_retire_requests(struct drm_device *dev); + void i915_gem_retire_work_handler(struct work_struct *work); + void i915_gem_clflush_object(struct drm_gem_object *obj); ++int i915_gem_object_set_domain(struct drm_gem_object *obj, ++ uint32_t read_domains, ++ uint32_t write_domain); ++int i915_gem_init_ringbuffer(struct drm_device *dev); ++void i915_gem_cleanup_ringbuffer(struct drm_device *dev); ++int i915_gem_do_init(struct drm_device *dev, unsigned long start, ++ unsigned long end); ++int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); ++int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, ++ int write); + + /* i915_gem_tiling.c */ + void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); +@@ -561,6 +636,10 @@ static inline void opregion_asle_intr(struct drm_device *dev) { return; } + static inline void opregion_enable_asle(struct drm_device *dev) { return; } + #endif + ++/* modesetting */ ++extern void intel_modeset_init(struct drm_device *dev); ++extern void intel_modeset_cleanup(struct drm_device *dev); ++ + /** + * Lock test for when it's just for synchronization of ring access. + * +@@ -578,6 +657,13 @@ static inline void opregion_enable_asle(struct drm_device *dev) { return; } + #define I915_WRITE16(reg, val) writel(val, dev_priv->regs + (reg)) + #define I915_READ8(reg) readb(dev_priv->regs + (reg)) + #define I915_WRITE8(reg, val) writeb(val, dev_priv->regs + (reg)) ++#ifdef writeq ++#define I915_WRITE64(reg, val) writeq(val, dev_priv->regs + (reg)) ++#else ++#define I915_WRITE64(reg, val) (writel(val, dev_priv->regs + (reg)), \ ++ writel(upper_32_bits(val), dev_priv->regs + \ ++ (reg) + 4)) ++#endif + + #define I915_VERBOSE 0 + +@@ -660,7 +746,8 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); + + #define IS_G4X(dev) ((dev)->pci_device == 0x2E02 || \ + (dev)->pci_device == 0x2E12 || \ +- (dev)->pci_device == 0x2E22) ++ (dev)->pci_device == 0x2E22 || \ ++ IS_GM45(dev)) + + #define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \ + (dev)->pci_device == 0x29B2 || \ +diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c +index 24fe8c1..c4ccaf3 100644 +--- a/drivers/gpu/drm/i915/i915_gem.c ++++ b/drivers/gpu/drm/i915/i915_gem.c +@@ -30,6 +30,7 @@ + #include "i915_drm.h" + #include "i915_drv.h" + #include ++#include + + #define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) + +@@ -40,8 +41,6 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, + static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj); + static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj); + static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj); +-static int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, +- int write); + static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, + int write); + static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, +@@ -51,34 +50,43 @@ static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *o + static int i915_gem_object_get_page_list(struct drm_gem_object *obj); + static void i915_gem_object_free_page_list(struct drm_gem_object *obj); + static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); ++static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, ++ unsigned alignment); ++static void i915_gem_object_get_fence_reg(struct drm_gem_object *obj); ++static void i915_gem_clear_fence_reg(struct drm_gem_object *obj); ++static int i915_gem_evict_something(struct drm_device *dev); ++ ++int i915_gem_do_init(struct drm_device *dev, unsigned long start, ++ unsigned long end) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; + +-static void +-i915_gem_cleanup_ringbuffer(struct drm_device *dev); ++ if (start >= end || ++ (start & (PAGE_SIZE - 1)) != 0 || ++ (end & (PAGE_SIZE - 1)) != 0) { ++ return -EINVAL; ++ } ++ ++ drm_mm_init(&dev_priv->mm.gtt_space, start, ++ end - start); ++ ++ dev->gtt_total = (uint32_t) (end - start); ++ ++ return 0; ++} + + int + i915_gem_init_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) + { +- drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_gem_init *args = data; ++ int ret; + + mutex_lock(&dev->struct_mutex); +- +- if (args->gtt_start >= args->gtt_end || +- (args->gtt_start & (PAGE_SIZE - 1)) != 0 || +- (args->gtt_end & (PAGE_SIZE - 1)) != 0) { +- mutex_unlock(&dev->struct_mutex); +- return -EINVAL; +- } +- +- drm_mm_init(&dev_priv->mm.gtt_space, args->gtt_start, +- args->gtt_end - args->gtt_start); +- +- dev->gtt_total = (uint32_t) (args->gtt_end - args->gtt_start); +- ++ ret = i915_gem_do_init(dev, args->gtt_start, args->gtt_end); + mutex_unlock(&dev->struct_mutex); + +- return 0; ++ return ret; + } + + int +@@ -529,6 +537,252 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, + return 0; + } + ++/** ++ * i915_gem_fault - fault a page into the GTT ++ * vma: VMA in question ++ * vmf: fault info ++ * ++ * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped ++ * from userspace. The fault handler takes care of binding the object to ++ * the GTT (if needed), allocating and programming a fence register (again, ++ * only if needed based on whether the old reg is still valid or the object ++ * is tiled) and inserting a new PTE into the faulting process. ++ * ++ * Note that the faulting process may involve evicting existing objects ++ * from the GTT and/or fence registers to make room. So performance may ++ * suffer if the GTT working set is large or there are few fence registers ++ * left. ++ */ ++int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ++{ ++ struct drm_gem_object *obj = vma->vm_private_data; ++ struct drm_device *dev = obj->dev; ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ pgoff_t page_offset; ++ unsigned long pfn; ++ int ret = 0; ++ ++ /* We don't use vmf->pgoff since that has the fake offset */ ++ page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> ++ PAGE_SHIFT; ++ ++ /* Now bind it into the GTT if needed */ ++ mutex_lock(&dev->struct_mutex); ++ if (!obj_priv->gtt_space) { ++ ret = i915_gem_object_bind_to_gtt(obj, obj_priv->gtt_alignment); ++ if (ret) { ++ mutex_unlock(&dev->struct_mutex); ++ return VM_FAULT_SIGBUS; ++ } ++ list_add(&obj_priv->list, &dev_priv->mm.inactive_list); ++ } ++ ++ /* Need a new fence register? */ ++ if (obj_priv->fence_reg == I915_FENCE_REG_NONE && ++ obj_priv->tiling_mode != I915_TILING_NONE) ++ i915_gem_object_get_fence_reg(obj); ++ ++ pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) + ++ page_offset; ++ ++ /* Finally, remap it using the new GTT offset */ ++ ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); ++ ++ mutex_unlock(&dev->struct_mutex); ++ ++ switch (ret) { ++ case -ENOMEM: ++ case -EAGAIN: ++ return VM_FAULT_OOM; ++ case -EFAULT: ++ case -EBUSY: ++ DRM_ERROR("can't insert pfn?? fault or busy...\n"); ++ return VM_FAULT_SIGBUS; ++ default: ++ return VM_FAULT_NOPAGE; ++ } ++} ++ ++/** ++ * i915_gem_create_mmap_offset - create a fake mmap offset for an object ++ * @obj: obj in question ++ * ++ * GEM memory mapping works by handing back to userspace a fake mmap offset ++ * it can use in a subsequent mmap(2) call. The DRM core code then looks ++ * up the object based on the offset and sets up the various memory mapping ++ * structures. ++ * ++ * This routine allocates and attaches a fake offset for @obj. ++ */ ++static int ++i915_gem_create_mmap_offset(struct drm_gem_object *obj) ++{ ++ struct drm_device *dev = obj->dev; ++ struct drm_gem_mm *mm = dev->mm_private; ++ struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ struct drm_map_list *list; ++ struct drm_map *map; ++ int ret = 0; ++ ++ /* Set the object up for mmap'ing */ ++ list = &obj->map_list; ++ list->map = drm_calloc(1, sizeof(struct drm_map_list), ++ DRM_MEM_DRIVER); ++ if (!list->map) ++ return -ENOMEM; ++ ++ map = list->map; ++ map->type = _DRM_GEM; ++ map->size = obj->size; ++ map->handle = obj; ++ ++ /* Get a DRM GEM mmap offset allocated... */ ++ list->file_offset_node = drm_mm_search_free(&mm->offset_manager, ++ obj->size / PAGE_SIZE, 0, 0); ++ if (!list->file_offset_node) { ++ DRM_ERROR("failed to allocate offset for bo %d\n", obj->name); ++ ret = -ENOMEM; ++ goto out_free_list; ++ } ++ ++ list->file_offset_node = drm_mm_get_block(list->file_offset_node, ++ obj->size / PAGE_SIZE, 0); ++ if (!list->file_offset_node) { ++ ret = -ENOMEM; ++ goto out_free_list; ++ } ++ ++ list->hash.key = list->file_offset_node->start; ++ if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) { ++ DRM_ERROR("failed to add to map hash\n"); ++ goto out_free_mm; ++ } ++ ++ /* By now we should be all set, any drm_mmap request on the offset ++ * below will get to our mmap & fault handler */ ++ obj_priv->mmap_offset = ((uint64_t) list->hash.key) << PAGE_SHIFT; ++ ++ return 0; ++ ++out_free_mm: ++ drm_mm_put_block(list->file_offset_node); ++out_free_list: ++ drm_free(list->map, sizeof(struct drm_map_list), DRM_MEM_DRIVER); ++ ++ return ret; ++} ++ ++/** ++ * i915_gem_get_gtt_alignment - return required GTT alignment for an object ++ * @obj: object to check ++ * ++ * Return the required GTT alignment for an object, taking into account ++ * potential fence register mapping if needed. ++ */ ++static uint32_t ++i915_gem_get_gtt_alignment(struct drm_gem_object *obj) ++{ ++ struct drm_device *dev = obj->dev; ++ struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ int start, i; ++ ++ /* ++ * Minimum alignment is 4k (GTT page size), but might be greater ++ * if a fence register is needed for the object. ++ */ ++ if (IS_I965G(dev) || obj_priv->tiling_mode == I915_TILING_NONE) ++ return 4096; ++ ++ /* ++ * Previous chips need to be aligned to the size of the smallest ++ * fence register that can contain the object. ++ */ ++ if (IS_I9XX(dev)) ++ start = 1024*1024; ++ else ++ start = 512*1024; ++ ++ for (i = start; i < obj->size; i <<= 1) ++ ; ++ ++ return i; ++} ++ ++/** ++ * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing ++ * @dev: DRM device ++ * @data: GTT mapping ioctl data ++ * @file_priv: GEM object info ++ * ++ * Simply returns the fake offset to userspace so it can mmap it. ++ * The mmap call will end up in drm_gem_mmap(), which will set things ++ * up so we can get faults in the handler above. ++ * ++ * The fault handler will take care of binding the object into the GTT ++ * (since it may have been evicted to make room for something), allocating ++ * a fence register, and mapping the appropriate aperture address into ++ * userspace. ++ */ ++int ++i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_i915_gem_mmap_gtt *args = data; ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ struct drm_gem_object *obj; ++ struct drm_i915_gem_object *obj_priv; ++ int ret; ++ ++ if (!(dev->driver->driver_features & DRIVER_GEM)) ++ return -ENODEV; ++ ++ obj = drm_gem_object_lookup(dev, file_priv, args->handle); ++ if (obj == NULL) ++ return -EBADF; ++ ++ mutex_lock(&dev->struct_mutex); ++ ++ obj_priv = obj->driver_private; ++ ++ if (!obj_priv->mmap_offset) { ++ ret = i915_gem_create_mmap_offset(obj); ++ if (ret) ++ return ret; ++ } ++ ++ args->offset = obj_priv->mmap_offset; ++ ++ obj_priv->gtt_alignment = i915_gem_get_gtt_alignment(obj); ++ ++ /* Make sure the alignment is correct for fence regs etc */ ++ if (obj_priv->agp_mem && ++ (obj_priv->gtt_offset & (obj_priv->gtt_alignment - 1))) { ++ drm_gem_object_unreference(obj); ++ mutex_unlock(&dev->struct_mutex); ++ return -EINVAL; ++ } ++ ++ /* ++ * Pull it into the GTT so that we have a page list (makes the ++ * initial fault faster and any subsequent flushing possible). ++ */ ++ if (!obj_priv->agp_mem) { ++ ret = i915_gem_object_bind_to_gtt(obj, obj_priv->gtt_alignment); ++ if (ret) { ++ drm_gem_object_unreference(obj); ++ mutex_unlock(&dev->struct_mutex); ++ return ret; ++ } ++ list_add(&obj_priv->list, &dev_priv->mm.inactive_list); ++ } ++ ++ drm_gem_object_unreference(obj); ++ mutex_unlock(&dev->struct_mutex); ++ ++ return 0; ++} ++ + static void + i915_gem_object_free_page_list(struct drm_gem_object *obj) + { +@@ -726,6 +980,7 @@ i915_gem_retire_request(struct drm_device *dev, + */ + if (obj_priv->last_rendering_seqno != request->seqno) + return; ++ + #if WATCH_LRU + DRM_INFO("%s: retire %d moves to inactive list %p\n", + __func__, request->seqno, obj); +@@ -956,6 +1211,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj) + { + struct drm_device *dev = obj->dev; + struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ loff_t offset; + int ret = 0; + + #if WATCH_BUF +@@ -991,6 +1247,14 @@ i915_gem_object_unbind(struct drm_gem_object *obj) + + BUG_ON(obj_priv->active); + ++ /* blow away mappings if mapped through GTT */ ++ offset = ((loff_t) obj->map_list.hash.key) << PAGE_SHIFT; ++ if (dev->dev_mapping) ++ unmap_mapping_range(dev->dev_mapping, offset, obj->size, 1); ++ ++ if (obj_priv->fence_reg != I915_FENCE_REG_NONE) ++ i915_gem_clear_fence_reg(obj); ++ + i915_gem_object_free_page_list(obj); + + if (obj_priv->gtt_space) { +@@ -1149,6 +1413,204 @@ i915_gem_object_get_page_list(struct drm_gem_object *obj) + return 0; + } + ++static void i965_write_fence_reg(struct drm_i915_fence_reg *reg) ++{ ++ struct drm_gem_object *obj = reg->obj; ++ struct drm_device *dev = obj->dev; ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ int regnum = obj_priv->fence_reg; ++ uint64_t val; ++ ++ val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) & ++ 0xfffff000) << 32; ++ val |= obj_priv->gtt_offset & 0xfffff000; ++ val |= ((obj_priv->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT; ++ if (obj_priv->tiling_mode == I915_TILING_Y) ++ val |= 1 << I965_FENCE_TILING_Y_SHIFT; ++ val |= I965_FENCE_REG_VALID; ++ ++ I915_WRITE64(FENCE_REG_965_0 + (regnum * 8), val); ++} ++ ++static void i915_write_fence_reg(struct drm_i915_fence_reg *reg) ++{ ++ struct drm_gem_object *obj = reg->obj; ++ struct drm_device *dev = obj->dev; ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ int regnum = obj_priv->fence_reg; ++ uint32_t val; ++ uint32_t pitch_val; ++ ++ if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) || ++ (obj_priv->gtt_offset & (obj->size - 1))) { ++ WARN(1, "%s: object not 1M or size aligned\n", __FUNCTION__); ++ return; ++ } ++ ++ if (obj_priv->tiling_mode == I915_TILING_Y && (IS_I945G(dev) || ++ IS_I945GM(dev) || ++ IS_G33(dev))) ++ pitch_val = (obj_priv->stride / 128) - 1; ++ else ++ pitch_val = (obj_priv->stride / 512) - 1; ++ ++ val = obj_priv->gtt_offset; ++ if (obj_priv->tiling_mode == I915_TILING_Y) ++ val |= 1 << I830_FENCE_TILING_Y_SHIFT; ++ val |= I915_FENCE_SIZE_BITS(obj->size); ++ val |= pitch_val << I830_FENCE_PITCH_SHIFT; ++ val |= I830_FENCE_REG_VALID; ++ ++ I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val); ++} ++ ++static void i830_write_fence_reg(struct drm_i915_fence_reg *reg) ++{ ++ struct drm_gem_object *obj = reg->obj; ++ struct drm_device *dev = obj->dev; ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ int regnum = obj_priv->fence_reg; ++ uint32_t val; ++ uint32_t pitch_val; ++ ++ if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) || ++ (obj_priv->gtt_offset & (obj->size - 1))) { ++ WARN(1, "%s: object not 1M or size aligned\n", __FUNCTION__); ++ return; ++ } ++ ++ pitch_val = (obj_priv->stride / 128) - 1; ++ ++ val = obj_priv->gtt_offset; ++ if (obj_priv->tiling_mode == I915_TILING_Y) ++ val |= 1 << I830_FENCE_TILING_Y_SHIFT; ++ val |= I830_FENCE_SIZE_BITS(obj->size); ++ val |= pitch_val << I830_FENCE_PITCH_SHIFT; ++ val |= I830_FENCE_REG_VALID; ++ ++ I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val); ++ ++} ++ ++/** ++ * i915_gem_object_get_fence_reg - set up a fence reg for an object ++ * @obj: object to map through a fence reg ++ * ++ * When mapping objects through the GTT, userspace wants to be able to write ++ * to them without having to worry about swizzling if the object is tiled. ++ * ++ * This function walks the fence regs looking for a free one for @obj, ++ * stealing one if it can't find any. ++ * ++ * It then sets up the reg based on the object's properties: address, pitch ++ * and tiling format. ++ */ ++static void ++i915_gem_object_get_fence_reg(struct drm_gem_object *obj) ++{ ++ struct drm_device *dev = obj->dev; ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ struct drm_i915_fence_reg *reg = NULL; ++ int i, ret; ++ ++ switch (obj_priv->tiling_mode) { ++ case I915_TILING_NONE: ++ WARN(1, "allocating a fence for non-tiled object?\n"); ++ break; ++ case I915_TILING_X: ++ WARN(obj_priv->stride & (512 - 1), ++ "object is X tiled but has non-512B pitch\n"); ++ break; ++ case I915_TILING_Y: ++ WARN(obj_priv->stride & (128 - 1), ++ "object is Y tiled but has non-128B pitch\n"); ++ break; ++ } ++ ++ /* First try to find a free reg */ ++ for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) { ++ reg = &dev_priv->fence_regs[i]; ++ if (!reg->obj) ++ break; ++ } ++ ++ /* None available, try to steal one or wait for a user to finish */ ++ if (i == dev_priv->num_fence_regs) { ++ struct drm_i915_gem_object *old_obj_priv = NULL; ++ loff_t offset; ++ ++try_again: ++ /* Could try to use LRU here instead... */ ++ for (i = dev_priv->fence_reg_start; ++ i < dev_priv->num_fence_regs; i++) { ++ reg = &dev_priv->fence_regs[i]; ++ old_obj_priv = reg->obj->driver_private; ++ if (!old_obj_priv->pin_count) ++ break; ++ } ++ ++ /* ++ * Now things get ugly... we have to wait for one of the ++ * objects to finish before trying again. ++ */ ++ if (i == dev_priv->num_fence_regs) { ++ ret = i915_gem_object_wait_rendering(reg->obj); ++ if (ret) { ++ WARN(ret, "wait_rendering failed: %d\n", ret); ++ return; ++ } ++ goto try_again; ++ } ++ ++ /* ++ * Zap this virtual mapping so we can set up a fence again ++ * for this object next time we need it. ++ */ ++ offset = ((loff_t) reg->obj->map_list.hash.key) << PAGE_SHIFT; ++ if (dev->dev_mapping) ++ unmap_mapping_range(dev->dev_mapping, offset, ++ reg->obj->size, 1); ++ old_obj_priv->fence_reg = I915_FENCE_REG_NONE; ++ } ++ ++ obj_priv->fence_reg = i; ++ reg->obj = obj; ++ ++ if (IS_I965G(dev)) ++ i965_write_fence_reg(reg); ++ else if (IS_I9XX(dev)) ++ i915_write_fence_reg(reg); ++ else ++ i830_write_fence_reg(reg); ++} ++ ++/** ++ * i915_gem_clear_fence_reg - clear out fence register info ++ * @obj: object to clear ++ * ++ * Zeroes out the fence register itself and clears out the associated ++ * data structures in dev_priv and obj_priv. ++ */ ++static void ++i915_gem_clear_fence_reg(struct drm_gem_object *obj) ++{ ++ struct drm_device *dev = obj->dev; ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ ++ if (IS_I965G(dev)) ++ I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0); ++ else ++ I915_WRITE(FENCE_REG_830_0 + (obj_priv->fence_reg * 4), 0); ++ ++ dev_priv->fence_regs[obj_priv->fence_reg].obj = NULL; ++ obj_priv->fence_reg = I915_FENCE_REG_NONE; ++} ++ + /** + * Finds free space in the GTT aperture and binds the object there. + */ +@@ -1307,7 +1769,7 @@ i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj) + * This function returns when the move is complete, including waiting on + * flushes to occur. + */ +-static int ++int + i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) + { + struct drm_i915_gem_object *obj_priv = obj->driver_private; +@@ -2249,11 +2711,22 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, + } + obj_priv = obj->driver_private; + +- ret = i915_gem_object_pin(obj, args->alignment); +- if (ret != 0) { +- drm_gem_object_unreference(obj); ++ if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) { ++ DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n", ++ args->handle); + mutex_unlock(&dev->struct_mutex); +- return ret; ++ return -EINVAL; ++ } ++ ++ obj_priv->user_pin_count++; ++ obj_priv->pin_filp = file_priv; ++ if (obj_priv->user_pin_count == 1) { ++ ret = i915_gem_object_pin(obj, args->alignment); ++ if (ret != 0) { ++ drm_gem_object_unreference(obj); ++ mutex_unlock(&dev->struct_mutex); ++ return ret; ++ } + } + + /* XXX - flush the CPU caches for pinned objects +@@ -2273,6 +2746,7 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data, + { + struct drm_i915_gem_pin *args = data; + struct drm_gem_object *obj; ++ struct drm_i915_gem_object *obj_priv; + + mutex_lock(&dev->struct_mutex); + +@@ -2284,7 +2758,19 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data, + return -EBADF; + } + +- i915_gem_object_unpin(obj); ++ obj_priv = obj->driver_private; ++ if (obj_priv->pin_filp != file_priv) { ++ DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n", ++ args->handle); ++ drm_gem_object_unreference(obj); ++ mutex_unlock(&dev->struct_mutex); ++ return -EINVAL; ++ } ++ obj_priv->user_pin_count--; ++ if (obj_priv->user_pin_count == 0) { ++ obj_priv->pin_filp = NULL; ++ i915_gem_object_unpin(obj); ++ } + + drm_gem_object_unreference(obj); + mutex_unlock(&dev->struct_mutex); +@@ -2351,12 +2837,18 @@ int i915_gem_init_object(struct drm_gem_object *obj) + + obj->driver_private = obj_priv; + obj_priv->obj = obj; ++ obj_priv->fence_reg = I915_FENCE_REG_NONE; + INIT_LIST_HEAD(&obj_priv->list); ++ + return 0; + } + + void i915_gem_free_object(struct drm_gem_object *obj) + { ++ struct drm_device *dev = obj->dev; ++ struct drm_gem_mm *mm = dev->mm_private; ++ struct drm_map_list *list; ++ struct drm_map *map; + struct drm_i915_gem_object *obj_priv = obj->driver_private; + + while (obj_priv->pin_count > 0) +@@ -2364,6 +2856,20 @@ void i915_gem_free_object(struct drm_gem_object *obj) + + i915_gem_object_unbind(obj); + ++ list = &obj->map_list; ++ drm_ht_remove_item(&mm->offset_hash, &list->hash); ++ ++ if (list->file_offset_node) { ++ drm_mm_put_block(list->file_offset_node); ++ list->file_offset_node = NULL; ++ } ++ ++ map = list->map; ++ if (map) { ++ drm_free(map, sizeof(*map), DRM_MEM_DRIVER); ++ list->map = NULL; ++ } ++ + drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER); + drm_free(obj->driver_private, 1, DRM_MEM_DRIVER); + } +@@ -2432,8 +2938,7 @@ i915_gem_idle(struct drm_device *dev) + */ + i915_gem_flush(dev, ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT), + ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)); +- seqno = i915_add_request(dev, ~(I915_GEM_DOMAIN_CPU | +- I915_GEM_DOMAIN_GTT)); ++ seqno = i915_add_request(dev, ~I915_GEM_DOMAIN_CPU); + + if (seqno == 0) { + mutex_unlock(&dev->struct_mutex); +@@ -2560,12 +3065,13 @@ i915_gem_init_hws(struct drm_device *dev) + return 0; + } + +-static int ++int + i915_gem_init_ringbuffer(struct drm_device *dev) + { + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_gem_object *obj; + struct drm_i915_gem_object *obj_priv; ++ drm_i915_ring_buffer_t *ring = &dev_priv->ring; + int ret; + u32 head; + +@@ -2587,24 +3093,24 @@ i915_gem_init_ringbuffer(struct drm_device *dev) + } + + /* Set up the kernel mapping for the ring. */ +- dev_priv->ring.Size = obj->size; +- dev_priv->ring.tail_mask = obj->size - 1; ++ ring->Size = obj->size; ++ ring->tail_mask = obj->size - 1; + +- dev_priv->ring.map.offset = dev->agp->base + obj_priv->gtt_offset; +- dev_priv->ring.map.size = obj->size; +- dev_priv->ring.map.type = 0; +- dev_priv->ring.map.flags = 0; +- dev_priv->ring.map.mtrr = 0; ++ ring->map.offset = dev->agp->base + obj_priv->gtt_offset; ++ ring->map.size = obj->size; ++ ring->map.type = 0; ++ ring->map.flags = 0; ++ ring->map.mtrr = 0; + +- drm_core_ioremap_wc(&dev_priv->ring.map, dev); +- if (dev_priv->ring.map.handle == NULL) { ++ drm_core_ioremap_wc(&ring->map, dev); ++ if (ring->map.handle == NULL) { + DRM_ERROR("Failed to map ringbuffer.\n"); + memset(&dev_priv->ring, 0, sizeof(dev_priv->ring)); + drm_gem_object_unreference(obj); + return -EINVAL; + } +- dev_priv->ring.ring_obj = obj; +- dev_priv->ring.virtual_start = dev_priv->ring.map.handle; ++ ring->ring_obj = obj; ++ ring->virtual_start = ring->map.handle; + + /* Stop the ring if it's running. */ + I915_WRITE(PRB0_CTL, 0); +@@ -2652,12 +3158,20 @@ i915_gem_init_ringbuffer(struct drm_device *dev) + } + + /* Update our cache of the ring state */ +- i915_kernel_lost_context(dev); ++ if (!drm_core_check_feature(dev, DRIVER_MODESET)) ++ i915_kernel_lost_context(dev); ++ else { ++ ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR; ++ ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR; ++ ring->space = ring->head - (ring->tail + 8); ++ if (ring->space < 0) ++ ring->space += ring->Size; ++ } + + return 0; + } + +-static void ++void + i915_gem_cleanup_ringbuffer(struct drm_device *dev) + { + drm_i915_private_t *dev_priv = dev->dev_private; +@@ -2695,6 +3209,9 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data, + drm_i915_private_t *dev_priv = dev->dev_private; + int ret; + ++ if (drm_core_check_feature(dev, DRIVER_MODESET)) ++ return 0; ++ + if (dev_priv->mm.wedged) { + DRM_ERROR("Reenabling wedged hardware, good luck\n"); + dev_priv->mm.wedged = 0; +@@ -2728,6 +3245,9 @@ i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, + drm_i915_private_t *dev_priv = dev->dev_private; + int ret; + ++ if (drm_core_check_feature(dev, DRIVER_MODESET)) ++ return 0; ++ + ret = i915_gem_idle(dev); + drm_irq_uninstall(dev); + +@@ -2758,5 +3278,13 @@ i915_gem_load(struct drm_device *dev) + i915_gem_retire_work_handler); + dev_priv->mm.next_gem_seqno = 1; + ++ /* Old X drivers will take 0-2 for front, back, depth buffers */ ++ dev_priv->fence_reg_start = 3; ++ ++ if (IS_I965G(dev)) ++ dev_priv->num_fence_regs = 16; ++ else ++ dev_priv->num_fence_regs = 8; ++ + i915_gem_detect_bit_6_swizzle(dev); + } +diff --git a/drivers/gpu/drm/i915/i915_gem_proc.c b/drivers/gpu/drm/i915/i915_gem_proc.c +index e8d5abe..4d1b9de 100644 +--- a/drivers/gpu/drm/i915/i915_gem_proc.c ++++ b/drivers/gpu/drm/i915/i915_gem_proc.c +@@ -250,6 +250,39 @@ static int i915_interrupt_info(char *buf, char **start, off_t offset, + return len - offset; + } + ++static int i915_hws_info(char *buf, char **start, off_t offset, ++ int request, int *eof, void *data) ++{ ++ struct drm_minor *minor = (struct drm_minor *) data; ++ struct drm_device *dev = minor->dev; ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ int len = 0, i; ++ volatile u32 *hws; ++ ++ if (offset > DRM_PROC_LIMIT) { ++ *eof = 1; ++ return 0; ++ } ++ ++ hws = (volatile u32 *)dev_priv->hw_status_page; ++ if (hws == NULL) { ++ *eof = 1; ++ return 0; ++ } ++ ++ *start = &buf[offset]; ++ *eof = 0; ++ for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) { ++ DRM_PROC_PRINT("0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n", ++ i * 4, ++ hws[i], hws[i + 1], hws[i + 2], hws[i + 3]); ++ } ++ if (len > request + offset) ++ return request; ++ *eof = 1; ++ return len - offset; ++} ++ + static struct drm_proc_list { + /** file name */ + const char *name; +@@ -262,6 +295,7 @@ static struct drm_proc_list { + {"i915_gem_request", i915_gem_request_info}, + {"i915_gem_seqno", i915_gem_seqno_info}, + {"i915_gem_interrupt", i915_interrupt_info}, ++ {"i915_gem_hws", i915_hws_info}, + }; + + #define I915_GEM_PROC_ENTRIES ARRAY_SIZE(i915_gem_proc_list) +diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c +index a8cb694..241f39b 100644 +--- a/drivers/gpu/drm/i915/i915_gem_tiling.c ++++ b/drivers/gpu/drm/i915/i915_gem_tiling.c +@@ -208,6 +208,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, + } + } + obj_priv->tiling_mode = args->tiling_mode; ++ obj_priv->stride = args->stride; + + mutex_unlock(&dev->struct_mutex); + +diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c +index 69b9a42..0cadafb 100644 +--- a/drivers/gpu/drm/i915/i915_irq.c ++++ b/drivers/gpu/drm/i915/i915_irq.c +@@ -30,6 +30,7 @@ + #include "drm.h" + #include "i915_drm.h" + #include "i915_drv.h" ++#include "intel_drv.h" + + #define MAX_NOPID ((u32)~0) + +@@ -51,6 +52,15 @@ + #define I915_INTERRUPT_ENABLE_MASK (I915_INTERRUPT_ENABLE_FIX | \ + I915_INTERRUPT_ENABLE_VAR) + ++#define I915_PIPE_VBLANK_STATUS (PIPE_START_VBLANK_INTERRUPT_STATUS |\ ++ PIPE_VBLANK_INTERRUPT_STATUS) ++ ++#define I915_PIPE_VBLANK_ENABLE (PIPE_START_VBLANK_INTERRUPT_ENABLE |\ ++ PIPE_VBLANK_INTERRUPT_ENABLE) ++ ++#define DRM_I915_VBLANK_PIPE_ALL (DRM_I915_VBLANK_PIPE_A | \ ++ DRM_I915_VBLANK_PIPE_B) ++ + void + i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask) + { +@@ -168,6 +178,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) + { + struct drm_device *dev = (struct drm_device *) arg; + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; ++ struct drm_i915_master_private *master_priv; + u32 iir, new_iir; + u32 pipea_stats, pipeb_stats; + u32 vblank_status; +@@ -200,6 +211,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) + spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); + pipea_stats = I915_READ(PIPEASTAT); + pipeb_stats = I915_READ(PIPEBSTAT); ++ + /* + * Clear the PIPE(A|B)STAT regs before the IIR + */ +@@ -222,9 +234,12 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) + I915_WRITE(IIR, iir); + new_iir = I915_READ(IIR); /* Flush posted writes */ + +- if (dev_priv->sarea_priv) +- dev_priv->sarea_priv->last_dispatch = +- READ_BREADCRUMB(dev_priv); ++ if (dev->primary->master) { ++ master_priv = dev->primary->master->driver_priv; ++ if (master_priv->sarea_priv) ++ master_priv->sarea_priv->last_dispatch = ++ READ_BREADCRUMB(dev_priv); ++ } + + if (iir & I915_USER_INTERRUPT) { + dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev); +@@ -269,6 +284,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) + static int i915_emit_irq(struct drm_device * dev) + { + drm_i915_private_t *dev_priv = dev->dev_private; ++ struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; + RING_LOCALS; + + i915_kernel_lost_context(dev); +@@ -278,8 +294,8 @@ static int i915_emit_irq(struct drm_device * dev) + dev_priv->counter++; + if (dev_priv->counter > 0x7FFFFFFFUL) + dev_priv->counter = 1; +- if (dev_priv->sarea_priv) +- dev_priv->sarea_priv->last_enqueue = dev_priv->counter; ++ if (master_priv->sarea_priv) ++ master_priv->sarea_priv->last_enqueue = dev_priv->counter; + + BEGIN_LP_RING(4); + OUT_RING(MI_STORE_DWORD_INDEX); +@@ -317,21 +333,20 @@ void i915_user_irq_put(struct drm_device *dev) + static int i915_wait_irq(struct drm_device * dev, int irq_nr) + { + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; ++ struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; + int ret = 0; + + DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr, + READ_BREADCRUMB(dev_priv)); + + if (READ_BREADCRUMB(dev_priv) >= irq_nr) { +- if (dev_priv->sarea_priv) { +- dev_priv->sarea_priv->last_dispatch = +- READ_BREADCRUMB(dev_priv); +- } ++ if (master_priv->sarea_priv) ++ master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); + return 0; + } + +- if (dev_priv->sarea_priv) +- dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; ++ if (master_priv->sarea_priv) ++ master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; + + i915_user_irq_get(dev); + DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ, +@@ -343,10 +358,6 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr) + READ_BREADCRUMB(dev_priv), (int)dev_priv->counter); + } + +- if (dev_priv->sarea_priv) +- dev_priv->sarea_priv->last_dispatch = +- READ_BREADCRUMB(dev_priv); +- + return ret; + } + +@@ -427,6 +438,14 @@ void i915_disable_vblank(struct drm_device *dev, int pipe) + spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); + } + ++void i915_enable_interrupt (struct drm_device *dev) ++{ ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ opregion_enable_asle(dev); ++ dev_priv->irq_enabled = 1; ++} ++ ++ + /* Set the vblank monitor pipe + */ + int i915_vblank_pipe_set(struct drm_device *dev, void *data, +@@ -487,6 +506,8 @@ void i915_driver_irq_preinstall(struct drm_device * dev) + { + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + ++ atomic_set(&dev_priv->irq_received, 0); ++ + I915_WRITE(HWSTAM, 0xeffe); + I915_WRITE(PIPEASTAT, 0); + I915_WRITE(PIPEBSTAT, 0); +diff --git a/drivers/gpu/drm/i915/i915_mem.c b/drivers/gpu/drm/i915/i915_mem.c +index 6126a60..96e2719 100644 +--- a/drivers/gpu/drm/i915/i915_mem.c ++++ b/drivers/gpu/drm/i915/i915_mem.c +@@ -46,7 +46,8 @@ + static void mark_block(struct drm_device * dev, struct mem_block *p, int in_use) + { + drm_i915_private_t *dev_priv = dev->dev_private; +- drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; ++ drm_i915_sarea_t *sarea_priv = master_priv->sarea_priv; + struct drm_tex_region *list; + unsigned shift, nr; + unsigned start; +diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h +index 9d24aae..47e6baf 100644 +--- a/drivers/gpu/drm/i915/i915_reg.h ++++ b/drivers/gpu/drm/i915/i915_reg.h +@@ -175,9 +175,26 @@ + #define DISPLAY_PLANE_B (1<<20) + + /* +- * Instruction and interrupt control regs ++ * Fence registers + */ ++#define FENCE_REG_830_0 0x2000 ++#define I830_FENCE_START_MASK 0x07f80000 ++#define I830_FENCE_TILING_Y_SHIFT 12 ++#define I830_FENCE_SIZE_BITS(size) ((get_order(size >> 19) - 1) << 8) ++#define I830_FENCE_PITCH_SHIFT 4 ++#define I830_FENCE_REG_VALID (1<<0) ++ ++#define I915_FENCE_START_MASK 0x0ff00000 ++#define I915_FENCE_SIZE_BITS(size) ((get_order(size >> 20) - 1) << 8) + ++#define FENCE_REG_965_0 0x03000 ++#define I965_FENCE_PITCH_SHIFT 2 ++#define I965_FENCE_TILING_Y_SHIFT 1 ++#define I965_FENCE_REG_VALID (1<<0) ++ ++/* ++ * Instruction and interrupt control regs ++ */ + #define PRB0_TAIL 0x02030 + #define PRB0_HEAD 0x02034 + #define PRB0_START 0x02038 +@@ -245,6 +262,7 @@ + #define CM0_RC_OP_FLUSH_DISABLE (1<<0) + #define GFX_FLSH_CNTL 0x02170 /* 915+ only */ + ++ + /* + * Framebuffer compression (915+ only) + */ +diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c +new file mode 100644 +index 0000000..4ca82a0 +--- /dev/null ++++ b/drivers/gpu/drm/i915/intel_bios.c +@@ -0,0 +1,193 @@ ++/* ++ * Copyright © 2006 Intel Corporation ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, ++ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE ++ * SOFTWARE. ++ * ++ * Authors: ++ * Eric Anholt ++ * ++ */ ++#include "drmP.h" ++#include "drm.h" ++#include "i915_drm.h" ++#include "i915_drv.h" ++#include "intel_bios.h" ++ ++ ++static void * ++find_section(struct bdb_header *bdb, int section_id) ++{ ++ u8 *base = (u8 *)bdb; ++ int index = 0; ++ u16 total, current_size; ++ u8 current_id; ++ ++ /* skip to first section */ ++ index += bdb->header_size; ++ total = bdb->bdb_size; ++ ++ /* walk the sections looking for section_id */ ++ while (index < total) { ++ current_id = *(base + index); ++ index++; ++ current_size = *((u16 *)(base + index)); ++ index += 2; ++ if (current_id == section_id) ++ return base + index; ++ index += current_size; ++ } ++ ++ return NULL; ++} ++ ++/* Try to find panel data */ ++static void ++parse_panel_data(struct drm_i915_private *dev_priv, struct bdb_header *bdb) ++{ ++ struct bdb_lvds_options *lvds_options; ++ struct bdb_lvds_lfp_data *lvds_lfp_data; ++ struct bdb_lvds_lfp_data_entry *entry; ++ struct lvds_dvo_timing *dvo_timing; ++ struct drm_display_mode *panel_fixed_mode; ++ ++ /* Defaults if we can't find VBT info */ ++ dev_priv->lvds_dither = 0; ++ dev_priv->lvds_vbt = 0; ++ ++ lvds_options = find_section(bdb, BDB_LVDS_OPTIONS); ++ if (!lvds_options) ++ return; ++ ++ dev_priv->lvds_dither = lvds_options->pixel_dither; ++ if (lvds_options->panel_type == 0xff) ++ return; ++ ++ lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA); ++ if (!lvds_lfp_data) ++ return; ++ ++ dev_priv->lvds_vbt = 1; ++ ++ entry = &lvds_lfp_data->data[lvds_options->panel_type]; ++ dvo_timing = &entry->dvo_timing; ++ ++ panel_fixed_mode = drm_calloc(1, sizeof(*panel_fixed_mode), ++ DRM_MEM_DRIVER); ++ ++ panel_fixed_mode->hdisplay = (dvo_timing->hactive_hi << 8) | ++ dvo_timing->hactive_lo; ++ panel_fixed_mode->hsync_start = panel_fixed_mode->hdisplay + ++ ((dvo_timing->hsync_off_hi << 8) | dvo_timing->hsync_off_lo); ++ panel_fixed_mode->hsync_end = panel_fixed_mode->hsync_start + ++ dvo_timing->hsync_pulse_width; ++ panel_fixed_mode->htotal = panel_fixed_mode->hdisplay + ++ ((dvo_timing->hblank_hi << 8) | dvo_timing->hblank_lo); ++ ++ panel_fixed_mode->vdisplay = (dvo_timing->vactive_hi << 8) | ++ dvo_timing->vactive_lo; ++ panel_fixed_mode->vsync_start = panel_fixed_mode->vdisplay + ++ dvo_timing->vsync_off; ++ panel_fixed_mode->vsync_end = panel_fixed_mode->vsync_start + ++ dvo_timing->vsync_pulse_width; ++ panel_fixed_mode->vtotal = panel_fixed_mode->vdisplay + ++ ((dvo_timing->vblank_hi << 8) | dvo_timing->vblank_lo); ++ panel_fixed_mode->clock = dvo_timing->clock * 10; ++ panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED; ++ ++ drm_mode_set_name(panel_fixed_mode); ++ ++ dev_priv->vbt_mode = panel_fixed_mode; ++ ++ DRM_DEBUG("Found panel mode in BIOS VBT tables:\n"); ++ drm_mode_debug_printmodeline(panel_fixed_mode); ++ ++ return; ++} ++ ++static void ++parse_general_features(struct drm_i915_private *dev_priv, ++ struct bdb_header *bdb) ++{ ++ struct bdb_general_features *general; ++ ++ /* Set sensible defaults in case we can't find the general block */ ++ dev_priv->int_tv_support = 1; ++ dev_priv->int_crt_support = 1; ++ ++ general = find_section(bdb, BDB_GENERAL_FEATURES); ++ if (general) { ++ dev_priv->int_tv_support = general->int_tv_support; ++ dev_priv->int_crt_support = general->int_crt_support; ++ } ++} ++ ++/** ++ * intel_init_bios - initialize VBIOS settings & find VBT ++ * @dev: DRM device ++ * ++ * Loads the Video BIOS and checks that the VBT exists. Sets scratch registers ++ * to appropriate values. ++ * ++ * VBT existence is a sanity check that is relied on by other i830_bios.c code. ++ * Note that it would be better to use a BIOS call to get the VBT, as BIOSes may ++ * feed an updated VBT back through that, compared to what we'll fetch using ++ * this method of groping around in the BIOS data. ++ * ++ * Returns 0 on success, nonzero on failure. ++ */ ++bool ++intel_init_bios(struct drm_device *dev) ++{ ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ struct pci_dev *pdev = dev->pdev; ++ struct vbt_header *vbt = NULL; ++ struct bdb_header *bdb; ++ u8 __iomem *bios; ++ size_t size; ++ int i; ++ ++ bios = pci_map_rom(pdev, &size); ++ if (!bios) ++ return -1; ++ ++ /* Scour memory looking for the VBT signature */ ++ for (i = 0; i + 4 < size; i++) { ++ if (!memcmp(bios + i, "$VBT", 4)) { ++ vbt = (struct vbt_header *)(bios + i); ++ break; ++ } ++ } ++ ++ if (!vbt) { ++ DRM_ERROR("VBT signature missing\n"); ++ pci_unmap_rom(pdev, bios); ++ return -1; ++ } ++ ++ bdb = (struct bdb_header *)(bios + i + vbt->bdb_offset); ++ ++ /* Grab useful general definitions */ ++ parse_general_features(dev_priv, bdb); ++ parse_panel_data(dev_priv, bdb); ++ ++ pci_unmap_rom(pdev, bios); ++ ++ return 0; ++} +diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h +new file mode 100644 +index 0000000..5ea715a +--- /dev/null ++++ b/drivers/gpu/drm/i915/intel_bios.h +@@ -0,0 +1,405 @@ ++/* ++ * Copyright © 2006 Intel Corporation ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, ++ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE ++ * SOFTWARE. ++ * ++ * Authors: ++ * Eric Anholt ++ * ++ */ ++ ++#ifndef _I830_BIOS_H_ ++#define _I830_BIOS_H_ ++ ++#include "drmP.h" ++ ++struct vbt_header { ++ u8 signature[20]; /**< Always starts with 'VBT$' */ ++ u16 version; /**< decimal */ ++ u16 header_size; /**< in bytes */ ++ u16 vbt_size; /**< in bytes */ ++ u8 vbt_checksum; ++ u8 reserved0; ++ u32 bdb_offset; /**< from beginning of VBT */ ++ u32 aim_offset[4]; /**< from beginning of VBT */ ++} __attribute__((packed)); ++ ++struct bdb_header { ++ u8 signature[16]; /**< Always 'BIOS_DATA_BLOCK' */ ++ u16 version; /**< decimal */ ++ u16 header_size; /**< in bytes */ ++ u16 bdb_size; /**< in bytes */ ++}; ++ ++/* strictly speaking, this is a "skip" block, but it has interesting info */ ++struct vbios_data { ++ u8 type; /* 0 == desktop, 1 == mobile */ ++ u8 relstage; ++ u8 chipset; ++ u8 lvds_present:1; ++ u8 tv_present:1; ++ u8 rsvd2:6; /* finish byte */ ++ u8 rsvd3[4]; ++ u8 signon[155]; ++ u8 copyright[61]; ++ u16 code_segment; ++ u8 dos_boot_mode; ++ u8 bandwidth_percent; ++ u8 rsvd4; /* popup memory size */ ++ u8 resize_pci_bios; ++ u8 rsvd5; /* is crt already on ddc2 */ ++} __attribute__((packed)); ++ ++/* ++ * There are several types of BIOS data blocks (BDBs), each block has ++ * an ID and size in the first 3 bytes (ID in first, size in next 2). ++ * Known types are listed below. ++ */ ++#define BDB_GENERAL_FEATURES 1 ++#define BDB_GENERAL_DEFINITIONS 2 ++#define BDB_OLD_TOGGLE_LIST 3 ++#define BDB_MODE_SUPPORT_LIST 4 ++#define BDB_GENERIC_MODE_TABLE 5 ++#define BDB_EXT_MMIO_REGS 6 ++#define BDB_SWF_IO 7 ++#define BDB_SWF_MMIO 8 ++#define BDB_DOT_CLOCK_TABLE 9 ++#define BDB_MODE_REMOVAL_TABLE 10 ++#define BDB_CHILD_DEVICE_TABLE 11 ++#define BDB_DRIVER_FEATURES 12 ++#define BDB_DRIVER_PERSISTENCE 13 ++#define BDB_EXT_TABLE_PTRS 14 ++#define BDB_DOT_CLOCK_OVERRIDE 15 ++#define BDB_DISPLAY_SELECT 16 ++/* 17 rsvd */ ++#define BDB_DRIVER_ROTATION 18 ++#define BDB_DISPLAY_REMOVE 19 ++#define BDB_OEM_CUSTOM 20 ++#define BDB_EFP_LIST 21 /* workarounds for VGA hsync/vsync */ ++#define BDB_SDVO_LVDS_OPTIONS 22 ++#define BDB_SDVO_PANEL_DTDS 23 ++#define BDB_SDVO_LVDS_PNP_IDS 24 ++#define BDB_SDVO_LVDS_POWER_SEQ 25 ++#define BDB_TV_OPTIONS 26 ++#define BDB_LVDS_OPTIONS 40 ++#define BDB_LVDS_LFP_DATA_PTRS 41 ++#define BDB_LVDS_LFP_DATA 42 ++#define BDB_LVDS_BACKLIGHT 43 ++#define BDB_LVDS_POWER 44 ++#define BDB_SKIP 254 /* VBIOS private block, ignore */ ++ ++struct bdb_general_features { ++ /* bits 1 */ ++ u8 panel_fitting:2; ++ u8 flexaim:1; ++ u8 msg_enable:1; ++ u8 clear_screen:3; ++ u8 color_flip:1; ++ ++ /* bits 2 */ ++ u8 download_ext_vbt:1; ++ u8 enable_ssc:1; ++ u8 ssc_freq:1; ++ u8 enable_lfp_on_override:1; ++ u8 disable_ssc_ddt:1; ++ u8 rsvd8:3; /* finish byte */ ++ ++ /* bits 3 */ ++ u8 disable_smooth_vision:1; ++ u8 single_dvi:1; ++ u8 rsvd9:6; /* finish byte */ ++ ++ /* bits 4 */ ++ u8 legacy_monitor_detect; ++ ++ /* bits 5 */ ++ u8 int_crt_support:1; ++ u8 int_tv_support:1; ++ u8 rsvd11:6; /* finish byte */ ++} __attribute__((packed)); ++ ++struct bdb_general_definitions { ++ /* DDC GPIO */ ++ u8 crt_ddc_gmbus_pin; ++ ++ /* DPMS bits */ ++ u8 dpms_acpi:1; ++ u8 skip_boot_crt_detect:1; ++ u8 dpms_aim:1; ++ u8 rsvd1:5; /* finish byte */ ++ ++ /* boot device bits */ ++ u8 boot_display[2]; ++ u8 child_dev_size; ++ ++ /* device info */ ++ u8 tv_or_lvds_info[33]; ++ u8 dev1[33]; ++ u8 dev2[33]; ++ u8 dev3[33]; ++ u8 dev4[33]; ++ /* may be another device block here on some platforms */ ++}; ++ ++struct bdb_lvds_options { ++ u8 panel_type; ++ u8 rsvd1; ++ /* LVDS capabilities, stored in a dword */ ++ u8 rsvd2:1; ++ u8 lvds_edid:1; ++ u8 pixel_dither:1; ++ u8 pfit_ratio_auto:1; ++ u8 pfit_gfx_mode_enhanced:1; ++ u8 pfit_text_mode_enhanced:1; ++ u8 pfit_mode:2; ++ u8 rsvd4; ++} __attribute__((packed)); ++ ++/* LFP pointer table contains entries to the struct below */ ++struct bdb_lvds_lfp_data_ptr { ++ u16 fp_timing_offset; /* offsets are from start of bdb */ ++ u8 fp_table_size; ++ u16 dvo_timing_offset; ++ u8 dvo_table_size; ++ u16 panel_pnp_id_offset; ++ u8 pnp_table_size; ++} __attribute__((packed)); ++ ++struct bdb_lvds_lfp_data_ptrs { ++ u8 lvds_entries; /* followed by one or more lvds_data_ptr structs */ ++ struct bdb_lvds_lfp_data_ptr ptr[16]; ++} __attribute__((packed)); ++ ++/* LFP data has 3 blocks per entry */ ++struct lvds_fp_timing { ++ u16 x_res; ++ u16 y_res; ++ u32 lvds_reg; ++ u32 lvds_reg_val; ++ u32 pp_on_reg; ++ u32 pp_on_reg_val; ++ u32 pp_off_reg; ++ u32 pp_off_reg_val; ++ u32 pp_cycle_reg; ++ u32 pp_cycle_reg_val; ++ u32 pfit_reg; ++ u32 pfit_reg_val; ++ u16 terminator; ++} __attribute__((packed)); ++ ++struct lvds_dvo_timing { ++ u16 clock; /**< In 10khz */ ++ u8 hactive_lo; ++ u8 hblank_lo; ++ u8 hblank_hi:4; ++ u8 hactive_hi:4; ++ u8 vactive_lo; ++ u8 vblank_lo; ++ u8 vblank_hi:4; ++ u8 vactive_hi:4; ++ u8 hsync_off_lo; ++ u8 hsync_pulse_width; ++ u8 vsync_pulse_width:4; ++ u8 vsync_off:4; ++ u8 rsvd0:6; ++ u8 hsync_off_hi:2; ++ u8 h_image; ++ u8 v_image; ++ u8 max_hv; ++ u8 h_border; ++ u8 v_border; ++ u8 rsvd1:3; ++ u8 digital:2; ++ u8 vsync_positive:1; ++ u8 hsync_positive:1; ++ u8 rsvd2:1; ++} __attribute__((packed)); ++ ++struct lvds_pnp_id { ++ u16 mfg_name; ++ u16 product_code; ++ u32 serial; ++ u8 mfg_week; ++ u8 mfg_year; ++} __attribute__((packed)); ++ ++struct bdb_lvds_lfp_data_entry { ++ struct lvds_fp_timing fp_timing; ++ struct lvds_dvo_timing dvo_timing; ++ struct lvds_pnp_id pnp_id; ++} __attribute__((packed)); ++ ++struct bdb_lvds_lfp_data { ++ struct bdb_lvds_lfp_data_entry data[16]; ++} __attribute__((packed)); ++ ++struct aimdb_header { ++ char signature[16]; ++ char oem_device[20]; ++ u16 aimdb_version; ++ u16 aimdb_header_size; ++ u16 aimdb_size; ++} __attribute__((packed)); ++ ++struct aimdb_block { ++ u8 aimdb_id; ++ u16 aimdb_size; ++} __attribute__((packed)); ++ ++struct vch_panel_data { ++ u16 fp_timing_offset; ++ u8 fp_timing_size; ++ u16 dvo_timing_offset; ++ u8 dvo_timing_size; ++ u16 text_fitting_offset; ++ u8 text_fitting_size; ++ u16 graphics_fitting_offset; ++ u8 graphics_fitting_size; ++} __attribute__((packed)); ++ ++struct vch_bdb_22 { ++ struct aimdb_block aimdb_block; ++ struct vch_panel_data panels[16]; ++} __attribute__((packed)); ++ ++bool intel_init_bios(struct drm_device *dev); ++ ++/* ++ * Driver<->VBIOS interaction occurs through scratch bits in ++ * GR18 & SWF*. ++ */ ++ ++/* GR18 bits are set on display switch and hotkey events */ ++#define GR18_DRIVER_SWITCH_EN (1<<7) /* 0: VBIOS control, 1: driver control */ ++#define GR18_HOTKEY_MASK 0x78 /* See also SWF4 15:0 */ ++#define GR18_HK_NONE (0x0<<3) ++#define GR18_HK_LFP_STRETCH (0x1<<3) ++#define GR18_HK_TOGGLE_DISP (0x2<<3) ++#define GR18_HK_DISP_SWITCH (0x4<<3) /* see SWF14 15:0 for what to enable */ ++#define GR18_HK_POPUP_DISABLED (0x6<<3) ++#define GR18_HK_POPUP_ENABLED (0x7<<3) ++#define GR18_HK_PFIT (0x8<<3) ++#define GR18_HK_APM_CHANGE (0xa<<3) ++#define GR18_HK_MULTIPLE (0xc<<3) ++#define GR18_USER_INT_EN (1<<2) ++#define GR18_A0000_FLUSH_EN (1<<1) ++#define GR18_SMM_EN (1<<0) ++ ++/* Set by driver, cleared by VBIOS */ ++#define SWF00_YRES_SHIFT 16 ++#define SWF00_XRES_SHIFT 0 ++#define SWF00_RES_MASK 0xffff ++ ++/* Set by VBIOS at boot time and driver at runtime */ ++#define SWF01_TV2_FORMAT_SHIFT 8 ++#define SWF01_TV1_FORMAT_SHIFT 0 ++#define SWF01_TV_FORMAT_MASK 0xffff ++ ++#define SWF10_VBIOS_BLC_I2C_EN (1<<29) ++#define SWF10_GTT_OVERRIDE_EN (1<<28) ++#define SWF10_LFP_DPMS_OVR (1<<27) /* override DPMS on display switch */ ++#define SWF10_ACTIVE_TOGGLE_LIST_MASK (7<<24) ++#define SWF10_OLD_TOGGLE 0x0 ++#define SWF10_TOGGLE_LIST_1 0x1 ++#define SWF10_TOGGLE_LIST_2 0x2 ++#define SWF10_TOGGLE_LIST_3 0x3 ++#define SWF10_TOGGLE_LIST_4 0x4 ++#define SWF10_PANNING_EN (1<<23) ++#define SWF10_DRIVER_LOADED (1<<22) ++#define SWF10_EXTENDED_DESKTOP (1<<21) ++#define SWF10_EXCLUSIVE_MODE (1<<20) ++#define SWF10_OVERLAY_EN (1<<19) ++#define SWF10_PLANEB_HOLDOFF (1<<18) ++#define SWF10_PLANEA_HOLDOFF (1<<17) ++#define SWF10_VGA_HOLDOFF (1<<16) ++#define SWF10_ACTIVE_DISP_MASK 0xffff ++#define SWF10_PIPEB_LFP2 (1<<15) ++#define SWF10_PIPEB_EFP2 (1<<14) ++#define SWF10_PIPEB_TV2 (1<<13) ++#define SWF10_PIPEB_CRT2 (1<<12) ++#define SWF10_PIPEB_LFP (1<<11) ++#define SWF10_PIPEB_EFP (1<<10) ++#define SWF10_PIPEB_TV (1<<9) ++#define SWF10_PIPEB_CRT (1<<8) ++#define SWF10_PIPEA_LFP2 (1<<7) ++#define SWF10_PIPEA_EFP2 (1<<6) ++#define SWF10_PIPEA_TV2 (1<<5) ++#define SWF10_PIPEA_CRT2 (1<<4) ++#define SWF10_PIPEA_LFP (1<<3) ++#define SWF10_PIPEA_EFP (1<<2) ++#define SWF10_PIPEA_TV (1<<1) ++#define SWF10_PIPEA_CRT (1<<0) ++ ++#define SWF11_MEMORY_SIZE_SHIFT 16 ++#define SWF11_SV_TEST_EN (1<<15) ++#define SWF11_IS_AGP (1<<14) ++#define SWF11_DISPLAY_HOLDOFF (1<<13) ++#define SWF11_DPMS_REDUCED (1<<12) ++#define SWF11_IS_VBE_MODE (1<<11) ++#define SWF11_PIPEB_ACCESS (1<<10) /* 0 here means pipe a */ ++#define SWF11_DPMS_MASK 0x07 ++#define SWF11_DPMS_OFF (1<<2) ++#define SWF11_DPMS_SUSPEND (1<<1) ++#define SWF11_DPMS_STANDBY (1<<0) ++#define SWF11_DPMS_ON 0 ++ ++#define SWF14_GFX_PFIT_EN (1<<31) ++#define SWF14_TEXT_PFIT_EN (1<<30) ++#define SWF14_LID_STATUS_CLOSED (1<<29) /* 0 here means open */ ++#define SWF14_POPUP_EN (1<<28) ++#define SWF14_DISPLAY_HOLDOFF (1<<27) ++#define SWF14_DISP_DETECT_EN (1<<26) ++#define SWF14_DOCKING_STATUS_DOCKED (1<<25) /* 0 here means undocked */ ++#define SWF14_DRIVER_STATUS (1<<24) ++#define SWF14_OS_TYPE_WIN9X (1<<23) ++#define SWF14_OS_TYPE_WINNT (1<<22) ++/* 21:19 rsvd */ ++#define SWF14_PM_TYPE_MASK 0x00070000 ++#define SWF14_PM_ACPI_VIDEO (0x4 << 16) ++#define SWF14_PM_ACPI (0x3 << 16) ++#define SWF14_PM_APM_12 (0x2 << 16) ++#define SWF14_PM_APM_11 (0x1 << 16) ++#define SWF14_HK_REQUEST_MASK 0x0000ffff /* see GR18 6:3 for event type */ ++ /* if GR18 indicates a display switch */ ++#define SWF14_DS_PIPEB_LFP2_EN (1<<15) ++#define SWF14_DS_PIPEB_EFP2_EN (1<<14) ++#define SWF14_DS_PIPEB_TV2_EN (1<<13) ++#define SWF14_DS_PIPEB_CRT2_EN (1<<12) ++#define SWF14_DS_PIPEB_LFP_EN (1<<11) ++#define SWF14_DS_PIPEB_EFP_EN (1<<10) ++#define SWF14_DS_PIPEB_TV_EN (1<<9) ++#define SWF14_DS_PIPEB_CRT_EN (1<<8) ++#define SWF14_DS_PIPEA_LFP2_EN (1<<7) ++#define SWF14_DS_PIPEA_EFP2_EN (1<<6) ++#define SWF14_DS_PIPEA_TV2_EN (1<<5) ++#define SWF14_DS_PIPEA_CRT2_EN (1<<4) ++#define SWF14_DS_PIPEA_LFP_EN (1<<3) ++#define SWF14_DS_PIPEA_EFP_EN (1<<2) ++#define SWF14_DS_PIPEA_TV_EN (1<<1) ++#define SWF14_DS_PIPEA_CRT_EN (1<<0) ++ /* if GR18 indicates a panel fitting request */ ++#define SWF14_PFIT_EN (1<<0) /* 0 means disable */ ++ /* if GR18 indicates an APM change request */ ++#define SWF14_APM_HIBERNATE 0x4 ++#define SWF14_APM_SUSPEND 0x3 ++#define SWF14_APM_STANDBY 0x1 ++#define SWF14_APM_RESTORE 0x0 ++ ++#endif /* _I830_BIOS_H_ */ +diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c +new file mode 100644 +index 0000000..5d9c94e +--- /dev/null ++++ b/drivers/gpu/drm/i915/intel_crt.c +@@ -0,0 +1,284 @@ ++/* ++ * Copyright © 2006-2007 Intel Corporation ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING ++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: ++ * Eric Anholt ++ */ ++ ++#include ++#include "drmP.h" ++#include "drm.h" ++#include "drm_crtc.h" ++#include "drm_crtc_helper.h" ++#include "intel_drv.h" ++#include "i915_drm.h" ++#include "i915_drv.h" ++ ++static void intel_crt_dpms(struct drm_encoder *encoder, int mode) ++{ ++ struct drm_device *dev = encoder->dev; ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ u32 temp; ++ ++ temp = I915_READ(ADPA); ++ temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE); ++ temp &= ~ADPA_DAC_ENABLE; ++ ++ switch(mode) { ++ case DRM_MODE_DPMS_ON: ++ temp |= ADPA_DAC_ENABLE; ++ break; ++ case DRM_MODE_DPMS_STANDBY: ++ temp |= ADPA_DAC_ENABLE | ADPA_HSYNC_CNTL_DISABLE; ++ break; ++ case DRM_MODE_DPMS_SUSPEND: ++ temp |= ADPA_DAC_ENABLE | ADPA_VSYNC_CNTL_DISABLE; ++ break; ++ case DRM_MODE_DPMS_OFF: ++ temp |= ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE; ++ break; ++ } ++ ++ I915_WRITE(ADPA, temp); ++} ++ ++static int intel_crt_mode_valid(struct drm_connector *connector, ++ struct drm_display_mode *mode) ++{ ++ if (mode->flags & DRM_MODE_FLAG_DBLSCAN) ++ return MODE_NO_DBLESCAN; ++ ++ if (mode->clock > 400000 || mode->clock < 25000) ++ return MODE_CLOCK_RANGE; ++ ++ return MODE_OK; ++} ++ ++static bool intel_crt_mode_fixup(struct drm_encoder *encoder, ++ struct drm_display_mode *mode, ++ struct drm_display_mode *adjusted_mode) ++{ ++ return true; ++} ++ ++static void intel_crt_mode_set(struct drm_encoder *encoder, ++ struct drm_display_mode *mode, ++ struct drm_display_mode *adjusted_mode) ++{ ++ ++ struct drm_device *dev = encoder->dev; ++ struct drm_crtc *crtc = encoder->crtc; ++ struct intel_crtc *intel_crtc = to_intel_crtc(crtc); ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ int dpll_md_reg; ++ u32 adpa, dpll_md; ++ ++ if (intel_crtc->pipe == 0) ++ dpll_md_reg = DPLL_A_MD; ++ else ++ dpll_md_reg = DPLL_B_MD; ++ ++ /* ++ * Disable separate mode multiplier used when cloning SDVO to CRT ++ * XXX this needs to be adjusted when we really are cloning ++ */ ++ if (IS_I965G(dev)) { ++ dpll_md = I915_READ(dpll_md_reg); ++ I915_WRITE(dpll_md_reg, ++ dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK); ++ } ++ ++ adpa = 0; ++ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) ++ adpa |= ADPA_HSYNC_ACTIVE_HIGH; ++ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) ++ adpa |= ADPA_VSYNC_ACTIVE_HIGH; ++ ++ if (intel_crtc->pipe == 0) ++ adpa |= ADPA_PIPE_A_SELECT; ++ else ++ adpa |= ADPA_PIPE_B_SELECT; ++ ++ I915_WRITE(ADPA, adpa); ++} ++ ++/** ++ * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect CRT presence. ++ * ++ * Not for i915G/i915GM ++ * ++ * \return true if CRT is connected. ++ * \return false if CRT is disconnected. ++ */ ++static bool intel_crt_detect_hotplug(struct drm_connector *connector) ++{ ++ struct drm_device *dev = connector->dev; ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ u32 temp; ++ ++ unsigned long timeout = jiffies + msecs_to_jiffies(1000); ++ ++ temp = I915_READ(PORT_HOTPLUG_EN); ++ ++ I915_WRITE(PORT_HOTPLUG_EN, ++ temp | CRT_HOTPLUG_FORCE_DETECT | (1 << 5)); ++ ++ do { ++ if (!(I915_READ(PORT_HOTPLUG_EN) & CRT_HOTPLUG_FORCE_DETECT)) ++ break; ++ msleep(1); ++ } while (time_after(timeout, jiffies)); ++ ++ if ((I915_READ(PORT_HOTPLUG_STAT) & CRT_HOTPLUG_MONITOR_MASK) == ++ CRT_HOTPLUG_MONITOR_COLOR) ++ return true; ++ ++ return false; ++} ++ ++static bool intel_crt_detect_ddc(struct drm_connector *connector) ++{ ++ struct intel_output *intel_output = to_intel_output(connector); ++ ++ /* CRT should always be at 0, but check anyway */ ++ if (intel_output->type != INTEL_OUTPUT_ANALOG) ++ return false; ++ ++ return intel_ddc_probe(intel_output); ++} ++ ++static enum drm_connector_status intel_crt_detect(struct drm_connector *connector) ++{ ++ struct drm_device *dev = connector->dev; ++ ++ if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev)) { ++ if (intel_crt_detect_hotplug(connector)) ++ return connector_status_connected; ++ else ++ return connector_status_disconnected; ++ } ++ ++ if (intel_crt_detect_ddc(connector)) ++ return connector_status_connected; ++ ++ /* TODO use load detect */ ++ return connector_status_unknown; ++} ++ ++static void intel_crt_destroy(struct drm_connector *connector) ++{ ++ struct intel_output *intel_output = to_intel_output(connector); ++ ++ intel_i2c_destroy(intel_output->ddc_bus); ++ drm_sysfs_connector_remove(connector); ++ drm_connector_cleanup(connector); ++ kfree(connector); ++} ++ ++static int intel_crt_get_modes(struct drm_connector *connector) ++{ ++ struct intel_output *intel_output = to_intel_output(connector); ++ return intel_ddc_get_modes(intel_output); ++} ++ ++static int intel_crt_set_property(struct drm_connector *connector, ++ struct drm_property *property, ++ uint64_t value) ++{ ++ struct drm_device *dev = connector->dev; ++ ++ if (property == dev->mode_config.dpms_property && connector->encoder) ++ intel_crt_dpms(connector->encoder, (uint32_t)(value & 0xf)); ++ ++ return 0; ++} ++ ++/* ++ * Routines for controlling stuff on the analog port ++ */ ++ ++static const struct drm_encoder_helper_funcs intel_crt_helper_funcs = { ++ .dpms = intel_crt_dpms, ++ .mode_fixup = intel_crt_mode_fixup, ++ .prepare = intel_encoder_prepare, ++ .commit = intel_encoder_commit, ++ .mode_set = intel_crt_mode_set, ++}; ++ ++static const struct drm_connector_funcs intel_crt_connector_funcs = { ++ .detect = intel_crt_detect, ++ .fill_modes = drm_helper_probe_single_connector_modes, ++ .destroy = intel_crt_destroy, ++ .set_property = intel_crt_set_property, ++}; ++ ++static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs = { ++ .mode_valid = intel_crt_mode_valid, ++ .get_modes = intel_crt_get_modes, ++ .best_encoder = intel_best_encoder, ++}; ++ ++void intel_crt_enc_destroy(struct drm_encoder *encoder) ++{ ++ drm_encoder_cleanup(encoder); ++} ++ ++static const struct drm_encoder_funcs intel_crt_enc_funcs = { ++ .destroy = intel_crt_enc_destroy, ++}; ++ ++void intel_crt_init(struct drm_device *dev) ++{ ++ struct drm_connector *connector; ++ struct intel_output *intel_output; ++ ++ intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL); ++ if (!intel_output) ++ return; ++ ++ connector = &intel_output->base; ++ drm_connector_init(dev, &intel_output->base, ++ &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); ++ ++ drm_encoder_init(dev, &intel_output->enc, &intel_crt_enc_funcs, ++ DRM_MODE_ENCODER_DAC); ++ ++ drm_mode_connector_attach_encoder(&intel_output->base, ++ &intel_output->enc); ++ ++ /* Set up the DDC bus. */ ++ intel_output->ddc_bus = intel_i2c_create(dev, GPIOA, "CRTDDC_A"); ++ if (!intel_output->ddc_bus) { ++ dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration " ++ "failed.\n"); ++ return; ++ } ++ ++ intel_output->type = INTEL_OUTPUT_ANALOG; ++ connector->interlace_allowed = 0; ++ connector->doublescan_allowed = 0; ++ ++ drm_encoder_helper_add(&intel_output->enc, &intel_crt_helper_funcs); ++ drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); ++ ++ drm_sysfs_connector_add(connector); ++} +diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c +new file mode 100644 +index 0000000..5689e44 +--- /dev/null ++++ b/drivers/gpu/drm/i915/intel_display.c +@@ -0,0 +1,1590 @@ ++/* ++ * Copyright © 2006-2007 Intel Corporation ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING ++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: ++ * Eric Anholt ++ */ ++ ++#include ++#include "drmP.h" ++#include "intel_drv.h" ++#include "i915_drm.h" ++#include "i915_drv.h" ++ ++#include "drm_crtc_helper.h" ++ ++bool intel_pipe_has_type (struct drm_crtc *crtc, int type); ++ ++typedef struct { ++ /* given values */ ++ int n; ++ int m1, m2; ++ int p1, p2; ++ /* derived values */ ++ int dot; ++ int vco; ++ int m; ++ int p; ++} intel_clock_t; ++ ++typedef struct { ++ int min, max; ++} intel_range_t; ++ ++typedef struct { ++ int dot_limit; ++ int p2_slow, p2_fast; ++} intel_p2_t; ++ ++#define INTEL_P2_NUM 2 ++ ++typedef struct { ++ intel_range_t dot, vco, n, m, m1, m2, p, p1; ++ intel_p2_t p2; ++} intel_limit_t; ++ ++#define I8XX_DOT_MIN 25000 ++#define I8XX_DOT_MAX 350000 ++#define I8XX_VCO_MIN 930000 ++#define I8XX_VCO_MAX 1400000 ++#define I8XX_N_MIN 3 ++#define I8XX_N_MAX 16 ++#define I8XX_M_MIN 96 ++#define I8XX_M_MAX 140 ++#define I8XX_M1_MIN 18 ++#define I8XX_M1_MAX 26 ++#define I8XX_M2_MIN 6 ++#define I8XX_M2_MAX 16 ++#define I8XX_P_MIN 4 ++#define I8XX_P_MAX 128 ++#define I8XX_P1_MIN 2 ++#define I8XX_P1_MAX 33 ++#define I8XX_P1_LVDS_MIN 1 ++#define I8XX_P1_LVDS_MAX 6 ++#define I8XX_P2_SLOW 4 ++#define I8XX_P2_FAST 2 ++#define I8XX_P2_LVDS_SLOW 14 ++#define I8XX_P2_LVDS_FAST 14 /* No fast option */ ++#define I8XX_P2_SLOW_LIMIT 165000 ++ ++#define I9XX_DOT_MIN 20000 ++#define I9XX_DOT_MAX 400000 ++#define I9XX_VCO_MIN 1400000 ++#define I9XX_VCO_MAX 2800000 ++#define I9XX_N_MIN 3 ++#define I9XX_N_MAX 8 ++#define I9XX_M_MIN 70 ++#define I9XX_M_MAX 120 ++#define I9XX_M1_MIN 10 ++#define I9XX_M1_MAX 20 ++#define I9XX_M2_MIN 5 ++#define I9XX_M2_MAX 9 ++#define I9XX_P_SDVO_DAC_MIN 5 ++#define I9XX_P_SDVO_DAC_MAX 80 ++#define I9XX_P_LVDS_MIN 7 ++#define I9XX_P_LVDS_MAX 98 ++#define I9XX_P1_MIN 1 ++#define I9XX_P1_MAX 8 ++#define I9XX_P2_SDVO_DAC_SLOW 10 ++#define I9XX_P2_SDVO_DAC_FAST 5 ++#define I9XX_P2_SDVO_DAC_SLOW_LIMIT 200000 ++#define I9XX_P2_LVDS_SLOW 14 ++#define I9XX_P2_LVDS_FAST 7 ++#define I9XX_P2_LVDS_SLOW_LIMIT 112000 ++ ++#define INTEL_LIMIT_I8XX_DVO_DAC 0 ++#define INTEL_LIMIT_I8XX_LVDS 1 ++#define INTEL_LIMIT_I9XX_SDVO_DAC 2 ++#define INTEL_LIMIT_I9XX_LVDS 3 ++ ++static const intel_limit_t intel_limits[] = { ++ { /* INTEL_LIMIT_I8XX_DVO_DAC */ ++ .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX }, ++ .vco = { .min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX }, ++ .n = { .min = I8XX_N_MIN, .max = I8XX_N_MAX }, ++ .m = { .min = I8XX_M_MIN, .max = I8XX_M_MAX }, ++ .m1 = { .min = I8XX_M1_MIN, .max = I8XX_M1_MAX }, ++ .m2 = { .min = I8XX_M2_MIN, .max = I8XX_M2_MAX }, ++ .p = { .min = I8XX_P_MIN, .max = I8XX_P_MAX }, ++ .p1 = { .min = I8XX_P1_MIN, .max = I8XX_P1_MAX }, ++ .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT, ++ .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST }, ++ }, ++ { /* INTEL_LIMIT_I8XX_LVDS */ ++ .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX }, ++ .vco = { .min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX }, ++ .n = { .min = I8XX_N_MIN, .max = I8XX_N_MAX }, ++ .m = { .min = I8XX_M_MIN, .max = I8XX_M_MAX }, ++ .m1 = { .min = I8XX_M1_MIN, .max = I8XX_M1_MAX }, ++ .m2 = { .min = I8XX_M2_MIN, .max = I8XX_M2_MAX }, ++ .p = { .min = I8XX_P_MIN, .max = I8XX_P_MAX }, ++ .p1 = { .min = I8XX_P1_LVDS_MIN, .max = I8XX_P1_LVDS_MAX }, ++ .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT, ++ .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST }, ++ }, ++ { /* INTEL_LIMIT_I9XX_SDVO_DAC */ ++ .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, ++ .vco = { .min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX }, ++ .n = { .min = I9XX_N_MIN, .max = I9XX_N_MAX }, ++ .m = { .min = I9XX_M_MIN, .max = I9XX_M_MAX }, ++ .m1 = { .min = I9XX_M1_MIN, .max = I9XX_M1_MAX }, ++ .m2 = { .min = I9XX_M2_MIN, .max = I9XX_M2_MAX }, ++ .p = { .min = I9XX_P_SDVO_DAC_MIN, .max = I9XX_P_SDVO_DAC_MAX }, ++ .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX }, ++ .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, ++ .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST }, ++ }, ++ { /* INTEL_LIMIT_I9XX_LVDS */ ++ .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, ++ .vco = { .min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX }, ++ .n = { .min = I9XX_N_MIN, .max = I9XX_N_MAX }, ++ .m = { .min = I9XX_M_MIN, .max = I9XX_M_MAX }, ++ .m1 = { .min = I9XX_M1_MIN, .max = I9XX_M1_MAX }, ++ .m2 = { .min = I9XX_M2_MIN, .max = I9XX_M2_MAX }, ++ .p = { .min = I9XX_P_LVDS_MIN, .max = I9XX_P_LVDS_MAX }, ++ .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX }, ++ /* The single-channel range is 25-112Mhz, and dual-channel ++ * is 80-224Mhz. Prefer single channel as much as possible. ++ */ ++ .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, ++ .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST }, ++ }, ++}; ++ ++static const intel_limit_t *intel_limit(struct drm_crtc *crtc) ++{ ++ struct drm_device *dev = crtc->dev; ++ const intel_limit_t *limit; ++ ++ if (IS_I9XX(dev)) { ++ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) ++ limit = &intel_limits[INTEL_LIMIT_I9XX_LVDS]; ++ else ++ limit = &intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC]; ++ } else { ++ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) ++ limit = &intel_limits[INTEL_LIMIT_I8XX_LVDS]; ++ else ++ limit = &intel_limits[INTEL_LIMIT_I8XX_DVO_DAC]; ++ } ++ return limit; ++} ++ ++/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */ ++ ++static void i8xx_clock(int refclk, intel_clock_t *clock) ++{ ++ clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2); ++ clock->p = clock->p1 * clock->p2; ++ clock->vco = refclk * clock->m / (clock->n + 2); ++ clock->dot = clock->vco / clock->p; ++} ++ ++/** Derive the pixel clock for the given refclk and divisors for 9xx chips. */ ++ ++static void i9xx_clock(int refclk, intel_clock_t *clock) ++{ ++ clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2); ++ clock->p = clock->p1 * clock->p2; ++ clock->vco = refclk * clock->m / (clock->n + 2); ++ clock->dot = clock->vco / clock->p; ++} ++ ++static void intel_clock(struct drm_device *dev, int refclk, ++ intel_clock_t *clock) ++{ ++ if (IS_I9XX(dev)) ++ return i9xx_clock (refclk, clock); ++ else ++ return i8xx_clock (refclk, clock); ++} ++ ++/** ++ * Returns whether any output on the specified pipe is of the specified type ++ */ ++bool intel_pipe_has_type (struct drm_crtc *crtc, int type) ++{ ++ struct drm_device *dev = crtc->dev; ++ struct drm_mode_config *mode_config = &dev->mode_config; ++ struct drm_connector *l_entry; ++ ++ list_for_each_entry(l_entry, &mode_config->connector_list, head) { ++ if (l_entry->encoder && ++ l_entry->encoder->crtc == crtc) { ++ struct intel_output *intel_output = to_intel_output(l_entry); ++ if (intel_output->type == type) ++ return true; ++ } ++ } ++ return false; ++} ++ ++#define INTELPllInvalid(s) { /* ErrorF (s) */; return false; } ++/** ++ * Returns whether the given set of divisors are valid for a given refclk with ++ * the given connectors. ++ */ ++ ++static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock) ++{ ++ const intel_limit_t *limit = intel_limit (crtc); ++ ++ if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) ++ INTELPllInvalid ("p1 out of range\n"); ++ if (clock->p < limit->p.min || limit->p.max < clock->p) ++ INTELPllInvalid ("p out of range\n"); ++ if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2) ++ INTELPllInvalid ("m2 out of range\n"); ++ if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) ++ INTELPllInvalid ("m1 out of range\n"); ++ if (clock->m1 <= clock->m2) ++ INTELPllInvalid ("m1 <= m2\n"); ++ if (clock->m < limit->m.min || limit->m.max < clock->m) ++ INTELPllInvalid ("m out of range\n"); ++ if (clock->n < limit->n.min || limit->n.max < clock->n) ++ INTELPllInvalid ("n out of range\n"); ++ if (clock->vco < limit->vco.min || limit->vco.max < clock->vco) ++ INTELPllInvalid ("vco out of range\n"); ++ /* XXX: We may need to be checking "Dot clock" depending on the multiplier, ++ * connector, etc., rather than just a single range. ++ */ ++ if (clock->dot < limit->dot.min || limit->dot.max < clock->dot) ++ INTELPllInvalid ("dot out of range\n"); ++ ++ return true; ++} ++ ++/** ++ * Returns a set of divisors for the desired target clock with the given ++ * refclk, or FALSE. The returned values represent the clock equation: ++ * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. ++ */ ++static bool intel_find_best_PLL(struct drm_crtc *crtc, int target, ++ int refclk, intel_clock_t *best_clock) ++{ ++ struct drm_device *dev = crtc->dev; ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ intel_clock_t clock; ++ const intel_limit_t *limit = intel_limit(crtc); ++ int err = target; ++ ++ if (IS_I9XX(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && ++ (I915_READ(LVDS) & LVDS_PORT_EN) != 0) { ++ /* ++ * For LVDS, if the panel is on, just rely on its current ++ * settings for dual-channel. We haven't figured out how to ++ * reliably set up different single/dual channel state, if we ++ * even can. ++ */ ++ if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) == ++ LVDS_CLKB_POWER_UP) ++ clock.p2 = limit->p2.p2_fast; ++ else ++ clock.p2 = limit->p2.p2_slow; ++ } else { ++ if (target < limit->p2.dot_limit) ++ clock.p2 = limit->p2.p2_slow; ++ else ++ clock.p2 = limit->p2.p2_fast; ++ } ++ ++ memset (best_clock, 0, sizeof (*best_clock)); ++ ++ for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) { ++ for (clock.m2 = limit->m2.min; clock.m2 < clock.m1 && ++ clock.m2 <= limit->m2.max; clock.m2++) { ++ for (clock.n = limit->n.min; clock.n <= limit->n.max; ++ clock.n++) { ++ for (clock.p1 = limit->p1.min; ++ clock.p1 <= limit->p1.max; clock.p1++) { ++ int this_err; ++ ++ intel_clock(dev, refclk, &clock); ++ ++ if (!intel_PLL_is_valid(crtc, &clock)) ++ continue; ++ ++ this_err = abs(clock.dot - target); ++ if (this_err < err) { ++ *best_clock = clock; ++ err = this_err; ++ } ++ } ++ } ++ } ++ } ++ ++ return (err != target); ++} ++ ++void ++intel_wait_for_vblank(struct drm_device *dev) ++{ ++ /* Wait for 20ms, i.e. one cycle at 50hz. */ ++ udelay(20000); ++} ++ ++void ++intel_pipe_set_base(struct drm_crtc *crtc, int x, int y) ++{ ++ struct drm_device *dev = crtc->dev; ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ struct drm_i915_master_private *master_priv; ++ struct intel_crtc *intel_crtc = to_intel_crtc(crtc); ++ struct intel_framebuffer *intel_fb; ++ struct drm_i915_gem_object *obj_priv; ++ struct drm_gem_object *obj; ++ int pipe = intel_crtc->pipe; ++ unsigned long Start, Offset; ++ int dspbase = (pipe == 0 ? DSPAADDR : DSPBADDR); ++ int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF); ++ int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE; ++ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR; ++ u32 dspcntr; ++ ++ /* no fb bound */ ++ if (!crtc->fb) { ++ DRM_DEBUG("No FB bound\n"); ++ return; ++ } ++ ++ intel_fb = to_intel_framebuffer(crtc->fb); ++ ++ obj = intel_fb->obj; ++ obj_priv = obj->driver_private; ++ ++ Start = obj_priv->gtt_offset; ++ Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8); ++ ++ I915_WRITE(dspstride, crtc->fb->pitch); ++ ++ dspcntr = I915_READ(dspcntr_reg); ++ switch (crtc->fb->bits_per_pixel) { ++ case 8: ++ dspcntr |= DISPPLANE_8BPP; ++ break; ++ case 16: ++ if (crtc->fb->depth == 15) ++ dspcntr |= DISPPLANE_15_16BPP; ++ else ++ dspcntr |= DISPPLANE_16BPP; ++ break; ++ case 24: ++ case 32: ++ dspcntr |= DISPPLANE_32BPP_NO_ALPHA; ++ break; ++ default: ++ DRM_ERROR("Unknown color depth\n"); ++ return; ++ } ++ I915_WRITE(dspcntr_reg, dspcntr); ++ ++ DRM_DEBUG("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y); ++ if (IS_I965G(dev)) { ++ I915_WRITE(dspbase, Offset); ++ I915_READ(dspbase); ++ I915_WRITE(dspsurf, Start); ++ I915_READ(dspsurf); ++ } else { ++ I915_WRITE(dspbase, Start + Offset); ++ I915_READ(dspbase); ++ } ++ ++ ++ if (!dev->primary->master) ++ return; ++ ++ master_priv = dev->primary->master->driver_priv; ++ if (!master_priv->sarea_priv) ++ return; ++ ++ switch (pipe) { ++ case 0: ++ master_priv->sarea_priv->pipeA_x = x; ++ master_priv->sarea_priv->pipeA_y = y; ++ break; ++ case 1: ++ master_priv->sarea_priv->pipeB_x = x; ++ master_priv->sarea_priv->pipeB_y = y; ++ break; ++ default: ++ DRM_ERROR("Can't update pipe %d in SAREA\n", pipe); ++ break; ++ } ++} ++ ++ ++ ++/** ++ * Sets the power management mode of the pipe and plane. ++ * ++ * This code should probably grow support for turning the cursor off and back ++ * on appropriately at the same time as we're turning the pipe off/on. ++ */ ++static void intel_crtc_dpms(struct drm_crtc *crtc, int mode) ++{ ++ struct drm_device *dev = crtc->dev; ++ struct drm_i915_master_private *master_priv; ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ struct intel_crtc *intel_crtc = to_intel_crtc(crtc); ++ int pipe = intel_crtc->pipe; ++ int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; ++ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR; ++ int dspbase_reg = (pipe == 0) ? DSPAADDR : DSPBADDR; ++ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; ++ u32 temp; ++ bool enabled; ++ ++ /* XXX: When our outputs are all unaware of DPMS modes other than off ++ * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. ++ */ ++ switch (mode) { ++ case DRM_MODE_DPMS_ON: ++ case DRM_MODE_DPMS_STANDBY: ++ case DRM_MODE_DPMS_SUSPEND: ++ /* Enable the DPLL */ ++ temp = I915_READ(dpll_reg); ++ if ((temp & DPLL_VCO_ENABLE) == 0) { ++ I915_WRITE(dpll_reg, temp); ++ I915_READ(dpll_reg); ++ /* Wait for the clocks to stabilize. */ ++ udelay(150); ++ I915_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE); ++ I915_READ(dpll_reg); ++ /* Wait for the clocks to stabilize. */ ++ udelay(150); ++ I915_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE); ++ I915_READ(dpll_reg); ++ /* Wait for the clocks to stabilize. */ ++ udelay(150); ++ } ++ ++ /* Enable the pipe */ ++ temp = I915_READ(pipeconf_reg); ++ if ((temp & PIPEACONF_ENABLE) == 0) ++ I915_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE); ++ ++ /* Enable the plane */ ++ temp = I915_READ(dspcntr_reg); ++ if ((temp & DISPLAY_PLANE_ENABLE) == 0) { ++ I915_WRITE(dspcntr_reg, temp | DISPLAY_PLANE_ENABLE); ++ /* Flush the plane changes */ ++ I915_WRITE(dspbase_reg, I915_READ(dspbase_reg)); ++ } ++ ++ intel_crtc_load_lut(crtc); ++ ++ /* Give the overlay scaler a chance to enable if it's on this pipe */ ++ //intel_crtc_dpms_video(crtc, true); TODO ++ break; ++ case DRM_MODE_DPMS_OFF: ++ /* Give the overlay scaler a chance to disable if it's on this pipe */ ++ //intel_crtc_dpms_video(crtc, FALSE); TODO ++ ++ /* Disable the VGA plane that we never use */ ++ I915_WRITE(VGACNTRL, VGA_DISP_DISABLE); ++ ++ /* Disable display plane */ ++ temp = I915_READ(dspcntr_reg); ++ if ((temp & DISPLAY_PLANE_ENABLE) != 0) { ++ I915_WRITE(dspcntr_reg, temp & ~DISPLAY_PLANE_ENABLE); ++ /* Flush the plane changes */ ++ I915_WRITE(dspbase_reg, I915_READ(dspbase_reg)); ++ I915_READ(dspbase_reg); ++ } ++ ++ if (!IS_I9XX(dev)) { ++ /* Wait for vblank for the disable to take effect */ ++ intel_wait_for_vblank(dev); ++ } ++ ++ /* Next, disable display pipes */ ++ temp = I915_READ(pipeconf_reg); ++ if ((temp & PIPEACONF_ENABLE) != 0) { ++ I915_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE); ++ I915_READ(pipeconf_reg); ++ } ++ ++ /* Wait for vblank for the disable to take effect. */ ++ intel_wait_for_vblank(dev); ++ ++ temp = I915_READ(dpll_reg); ++ if ((temp & DPLL_VCO_ENABLE) != 0) { ++ I915_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE); ++ I915_READ(dpll_reg); ++ } ++ ++ /* Wait for the clocks to turn off. */ ++ udelay(150); ++ break; ++ } ++ ++ if (!dev->primary->master) ++ return; ++ ++ master_priv = dev->primary->master->driver_priv; ++ if (!master_priv->sarea_priv) ++ return; ++ ++ enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF; ++ ++ switch (pipe) { ++ case 0: ++ master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0; ++ master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0; ++ break; ++ case 1: ++ master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0; ++ master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0; ++ break; ++ default: ++ DRM_ERROR("Can't update pipe %d in SAREA\n", pipe); ++ break; ++ } ++ ++ intel_crtc->dpms_mode = mode; ++} ++ ++static void intel_crtc_prepare (struct drm_crtc *crtc) ++{ ++ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; ++ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); ++} ++ ++static void intel_crtc_commit (struct drm_crtc *crtc) ++{ ++ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; ++ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); ++} ++ ++void intel_encoder_prepare (struct drm_encoder *encoder) ++{ ++ struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; ++ /* lvds has its own version of prepare see intel_lvds_prepare */ ++ encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF); ++} ++ ++void intel_encoder_commit (struct drm_encoder *encoder) ++{ ++ struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; ++ /* lvds has its own version of commit see intel_lvds_commit */ ++ encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); ++} ++ ++static bool intel_crtc_mode_fixup(struct drm_crtc *crtc, ++ struct drm_display_mode *mode, ++ struct drm_display_mode *adjusted_mode) ++{ ++ return true; ++} ++ ++ ++/** Returns the core display clock speed for i830 - i945 */ ++static int intel_get_core_clock_speed(struct drm_device *dev) ++{ ++ ++ /* Core clock values taken from the published datasheets. ++ * The 830 may go up to 166 Mhz, which we should check. ++ */ ++ if (IS_I945G(dev)) ++ return 400000; ++ else if (IS_I915G(dev)) ++ return 333000; ++ else if (IS_I945GM(dev) || IS_845G(dev)) ++ return 200000; ++ else if (IS_I915GM(dev)) { ++ u16 gcfgc = 0; ++ ++ pci_read_config_word(dev->pdev, GCFGC, &gcfgc); ++ ++ if (gcfgc & GC_LOW_FREQUENCY_ENABLE) ++ return 133000; ++ else { ++ switch (gcfgc & GC_DISPLAY_CLOCK_MASK) { ++ case GC_DISPLAY_CLOCK_333_MHZ: ++ return 333000; ++ default: ++ case GC_DISPLAY_CLOCK_190_200_MHZ: ++ return 190000; ++ } ++ } ++ } else if (IS_I865G(dev)) ++ return 266000; ++ else if (IS_I855(dev)) { ++ u16 hpllcc = 0; ++ /* Assume that the hardware is in the high speed state. This ++ * should be the default. ++ */ ++ switch (hpllcc & GC_CLOCK_CONTROL_MASK) { ++ case GC_CLOCK_133_200: ++ case GC_CLOCK_100_200: ++ return 200000; ++ case GC_CLOCK_166_250: ++ return 250000; ++ case GC_CLOCK_100_133: ++ return 133000; ++ } ++ } else /* 852, 830 */ ++ return 133000; ++ ++ return 0; /* Silence gcc warning */ ++} ++ ++ ++/** ++ * Return the pipe currently connected to the panel fitter, ++ * or -1 if the panel fitter is not present or not in use ++ */ ++static int intel_panel_fitter_pipe (struct drm_device *dev) ++{ ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ u32 pfit_control; ++ ++ /* i830 doesn't have a panel fitter */ ++ if (IS_I830(dev)) ++ return -1; ++ ++ pfit_control = I915_READ(PFIT_CONTROL); ++ ++ /* See if the panel fitter is in use */ ++ if ((pfit_control & PFIT_ENABLE) == 0) ++ return -1; ++ ++ /* 965 can place panel fitter on either pipe */ ++ if (IS_I965G(dev)) ++ return (pfit_control >> 29) & 0x3; ++ ++ /* older chips can only use pipe 1 */ ++ return 1; ++} ++ ++static void intel_crtc_mode_set(struct drm_crtc *crtc, ++ struct drm_display_mode *mode, ++ struct drm_display_mode *adjusted_mode, ++ int x, int y) ++{ ++ struct drm_device *dev = crtc->dev; ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ struct intel_crtc *intel_crtc = to_intel_crtc(crtc); ++ int pipe = intel_crtc->pipe; ++ int fp_reg = (pipe == 0) ? FPA0 : FPB0; ++ int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; ++ int dpll_md_reg = (intel_crtc->pipe == 0) ? DPLL_A_MD : DPLL_B_MD; ++ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR; ++ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; ++ int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B; ++ int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B; ++ int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B; ++ int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B; ++ int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B; ++ int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B; ++ int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE; ++ int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS; ++ int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC; ++ int refclk; ++ intel_clock_t clock; ++ u32 dpll = 0, fp = 0, dspcntr, pipeconf; ++ bool ok, is_sdvo = false, is_dvo = false; ++ bool is_crt = false, is_lvds = false, is_tv = false; ++ struct drm_mode_config *mode_config = &dev->mode_config; ++ struct drm_connector *connector; ++ ++ drm_vblank_pre_modeset(dev, pipe); ++ ++ list_for_each_entry(connector, &mode_config->connector_list, head) { ++ struct intel_output *intel_output = to_intel_output(connector); ++ ++ if (!connector->encoder || connector->encoder->crtc != crtc) ++ continue; ++ ++ switch (intel_output->type) { ++ case INTEL_OUTPUT_LVDS: ++ is_lvds = true; ++ break; ++ case INTEL_OUTPUT_SDVO: ++ is_sdvo = true; ++ break; ++ case INTEL_OUTPUT_DVO: ++ is_dvo = true; ++ break; ++ case INTEL_OUTPUT_TVOUT: ++ is_tv = true; ++ break; ++ case INTEL_OUTPUT_ANALOG: ++ is_crt = true; ++ break; ++ } ++ } ++ ++ if (IS_I9XX(dev)) { ++ refclk = 96000; ++ } else { ++ refclk = 48000; ++ } ++ ++ ok = intel_find_best_PLL(crtc, adjusted_mode->clock, refclk, &clock); ++ if (!ok) { ++ DRM_ERROR("Couldn't find PLL settings for mode!\n"); ++ return; ++ } ++ ++ fp = clock.n << 16 | clock.m1 << 8 | clock.m2; ++ ++ dpll = DPLL_VGA_MODE_DIS; ++ if (IS_I9XX(dev)) { ++ if (is_lvds) ++ dpll |= DPLLB_MODE_LVDS; ++ else ++ dpll |= DPLLB_MODE_DAC_SERIAL; ++ if (is_sdvo) { ++ dpll |= DPLL_DVO_HIGH_SPEED; ++ if (IS_I945G(dev) || IS_I945GM(dev)) { ++ int sdvo_pixel_multiply = adjusted_mode->clock / mode->clock; ++ dpll |= (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES; ++ } ++ } ++ ++ /* compute bitmask from p1 value */ ++ dpll |= (1 << (clock.p1 - 1)) << 16; ++ switch (clock.p2) { ++ case 5: ++ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; ++ break; ++ case 7: ++ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; ++ break; ++ case 10: ++ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; ++ break; ++ case 14: ++ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; ++ break; ++ } ++ if (IS_I965G(dev)) ++ dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); ++ } else { ++ if (is_lvds) { ++ dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; ++ } else { ++ if (clock.p1 == 2) ++ dpll |= PLL_P1_DIVIDE_BY_TWO; ++ else ++ dpll |= (clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT; ++ if (clock.p2 == 4) ++ dpll |= PLL_P2_DIVIDE_BY_4; ++ } ++ } ++ ++ if (is_tv) { ++ /* XXX: just matching BIOS for now */ ++/* dpll |= PLL_REF_INPUT_TVCLKINBC; */ ++ dpll |= 3; ++ } ++ else ++ dpll |= PLL_REF_INPUT_DREFCLK; ++ ++ /* setup pipeconf */ ++ pipeconf = I915_READ(pipeconf_reg); ++ ++ /* Set up the display plane register */ ++ dspcntr = DISPPLANE_GAMMA_ENABLE; ++ ++ if (pipe == 0) ++ dspcntr |= DISPPLANE_SEL_PIPE_A; ++ else ++ dspcntr |= DISPPLANE_SEL_PIPE_B; ++ ++ if (pipe == 0 && !IS_I965G(dev)) { ++ /* Enable pixel doubling when the dot clock is > 90% of the (display) ++ * core speed. ++ * ++ * XXX: No double-wide on 915GM pipe B. Is that the only reason for the ++ * pipe == 0 check? ++ */ ++ if (mode->clock > intel_get_core_clock_speed(dev) * 9 / 10) ++ pipeconf |= PIPEACONF_DOUBLE_WIDE; ++ else ++ pipeconf &= ~PIPEACONF_DOUBLE_WIDE; ++ } ++ ++ dspcntr |= DISPLAY_PLANE_ENABLE; ++ pipeconf |= PIPEACONF_ENABLE; ++ dpll |= DPLL_VCO_ENABLE; ++ ++ ++ /* Disable the panel fitter if it was on our pipe */ ++ if (intel_panel_fitter_pipe(dev) == pipe) ++ I915_WRITE(PFIT_CONTROL, 0); ++ ++ DRM_DEBUG("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); ++ drm_mode_debug_printmodeline(mode); ++ ++ ++ if (dpll & DPLL_VCO_ENABLE) { ++ I915_WRITE(fp_reg, fp); ++ I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE); ++ I915_READ(dpll_reg); ++ udelay(150); ++ } ++ ++ /* The LVDS pin pair needs to be on before the DPLLs are enabled. ++ * This is an exception to the general rule that mode_set doesn't turn ++ * things on. ++ */ ++ if (is_lvds) { ++ u32 lvds = I915_READ(LVDS); ++ ++ lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP | LVDS_PIPEB_SELECT; ++ /* Set the B0-B3 data pairs corresponding to whether we're going to ++ * set the DPLLs for dual-channel mode or not. ++ */ ++ if (clock.p2 == 7) ++ lvds |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP; ++ else ++ lvds &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP); ++ ++ /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP) ++ * appropriately here, but we need to look more thoroughly into how ++ * panels behave in the two modes. ++ */ ++ ++ I915_WRITE(LVDS, lvds); ++ I915_READ(LVDS); ++ } ++ ++ I915_WRITE(fp_reg, fp); ++ I915_WRITE(dpll_reg, dpll); ++ I915_READ(dpll_reg); ++ /* Wait for the clocks to stabilize. */ ++ udelay(150); ++ ++ if (IS_I965G(dev)) { ++ int sdvo_pixel_multiply = adjusted_mode->clock / mode->clock; ++ I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) | ++ ((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT)); ++ } else { ++ /* write it again -- the BIOS does, after all */ ++ I915_WRITE(dpll_reg, dpll); ++ } ++ I915_READ(dpll_reg); ++ /* Wait for the clocks to stabilize. */ ++ udelay(150); ++ ++ I915_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) | ++ ((adjusted_mode->crtc_htotal - 1) << 16)); ++ I915_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) | ++ ((adjusted_mode->crtc_hblank_end - 1) << 16)); ++ I915_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) | ++ ((adjusted_mode->crtc_hsync_end - 1) << 16)); ++ I915_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) | ++ ((adjusted_mode->crtc_vtotal - 1) << 16)); ++ I915_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) | ++ ((adjusted_mode->crtc_vblank_end - 1) << 16)); ++ I915_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) | ++ ((adjusted_mode->crtc_vsync_end - 1) << 16)); ++ /* pipesrc and dspsize control the size that is scaled from, which should ++ * always be the user's requested size. ++ */ ++ I915_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1)); ++ I915_WRITE(dsppos_reg, 0); ++ I915_WRITE(pipesrc_reg, ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); ++ I915_WRITE(pipeconf_reg, pipeconf); ++ I915_READ(pipeconf_reg); ++ ++ intel_wait_for_vblank(dev); ++ ++ I915_WRITE(dspcntr_reg, dspcntr); ++ ++ /* Flush the plane changes */ ++ intel_pipe_set_base(crtc, x, y); ++ ++ intel_wait_for_vblank(dev); ++ ++ drm_vblank_post_modeset(dev, pipe); ++} ++ ++/** Loads the palette/gamma unit for the CRTC with the prepared values */ ++void intel_crtc_load_lut(struct drm_crtc *crtc) ++{ ++ struct drm_device *dev = crtc->dev; ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ struct intel_crtc *intel_crtc = to_intel_crtc(crtc); ++ int palreg = (intel_crtc->pipe == 0) ? PALETTE_A : PALETTE_B; ++ int i; ++ ++ /* The clocks have to be on to load the palette. */ ++ if (!crtc->enabled) ++ return; ++ ++ for (i = 0; i < 256; i++) { ++ I915_WRITE(palreg + 4 * i, ++ (intel_crtc->lut_r[i] << 16) | ++ (intel_crtc->lut_g[i] << 8) | ++ intel_crtc->lut_b[i]); ++ } ++} ++ ++static int intel_crtc_cursor_set(struct drm_crtc *crtc, ++ struct drm_file *file_priv, ++ uint32_t handle, ++ uint32_t width, uint32_t height) ++{ ++ struct drm_device *dev = crtc->dev; ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ struct intel_crtc *intel_crtc = to_intel_crtc(crtc); ++ struct drm_gem_object *bo; ++ struct drm_i915_gem_object *obj_priv; ++ int pipe = intel_crtc->pipe; ++ uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR; ++ uint32_t base = (pipe == 0) ? CURABASE : CURBBASE; ++ uint32_t temp; ++ size_t addr; ++ ++ DRM_DEBUG("\n"); ++ ++ /* if we want to turn off the cursor ignore width and height */ ++ if (!handle) { ++ DRM_DEBUG("cursor off\n"); ++ /* turn of the cursor */ ++ temp = 0; ++ temp |= CURSOR_MODE_DISABLE; ++ ++ I915_WRITE(control, temp); ++ I915_WRITE(base, 0); ++ return 0; ++ } ++ ++ /* Currently we only support 64x64 cursors */ ++ if (width != 64 || height != 64) { ++ DRM_ERROR("we currently only support 64x64 cursors\n"); ++ return -EINVAL; ++ } ++ ++ bo = drm_gem_object_lookup(dev, file_priv, handle); ++ if (!bo) ++ return -ENOENT; ++ ++ obj_priv = bo->driver_private; ++ ++ if (bo->size < width * height * 4) { ++ DRM_ERROR("buffer is to small\n"); ++ drm_gem_object_unreference(bo); ++ return -ENOMEM; ++ } ++ ++ if (dev_priv->cursor_needs_physical) { ++ addr = dev->agp->base + obj_priv->gtt_offset; ++ } else { ++ addr = obj_priv->gtt_offset; ++ } ++ ++ intel_crtc->cursor_addr = addr; ++ temp = 0; ++ /* set the pipe for the cursor */ ++ temp |= (pipe << 28); ++ temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE; ++ ++ I915_WRITE(control, temp); ++ I915_WRITE(base, addr); ++ ++ return 0; ++} ++ ++static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) ++{ ++ struct drm_device *dev = crtc->dev; ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ struct intel_crtc *intel_crtc = to_intel_crtc(crtc); ++ int pipe = intel_crtc->pipe; ++ uint32_t temp = 0; ++ uint32_t adder; ++ ++ if (x < 0) { ++ temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT); ++ x = -x; ++ } ++ if (y < 0) { ++ temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT); ++ y = -y; ++ } ++ ++ temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT); ++ temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT); ++ ++ adder = intel_crtc->cursor_addr; ++ I915_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp); ++ I915_WRITE((pipe == 0) ? CURABASE : CURBBASE, adder); ++ ++ return 0; ++} ++ ++/** Sets the color ramps on behalf of RandR */ ++void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, ++ u16 blue, int regno) ++{ ++ struct intel_crtc *intel_crtc = to_intel_crtc(crtc); ++ ++ intel_crtc->lut_r[regno] = red >> 8; ++ intel_crtc->lut_g[regno] = green >> 8; ++ intel_crtc->lut_b[regno] = blue >> 8; ++} ++ ++static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, ++ u16 *blue, uint32_t size) ++{ ++ struct intel_crtc *intel_crtc = to_intel_crtc(crtc); ++ int i; ++ ++ if (size != 256) ++ return; ++ ++ for (i = 0; i < 256; i++) { ++ intel_crtc->lut_r[i] = red[i] >> 8; ++ intel_crtc->lut_g[i] = green[i] >> 8; ++ intel_crtc->lut_b[i] = blue[i] >> 8; ++ } ++ ++ intel_crtc_load_lut(crtc); ++} ++ ++/** ++ * Get a pipe with a simple mode set on it for doing load-based monitor ++ * detection. ++ * ++ * It will be up to the load-detect code to adjust the pipe as appropriate for ++ * its requirements. The pipe will be connected to no other outputs. ++ * ++ * Currently this code will only succeed if there is a pipe with no outputs ++ * configured for it. In the future, it could choose to temporarily disable ++ * some outputs to free up a pipe for its use. ++ * ++ * \return crtc, or NULL if no pipes are available. ++ */ ++ ++/* VESA 640x480x72Hz mode to set on the pipe */ ++static struct drm_display_mode load_detect_mode = { ++ DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664, ++ 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), ++}; ++ ++struct drm_crtc *intel_get_load_detect_pipe(struct intel_output *intel_output, ++ struct drm_display_mode *mode, ++ int *dpms_mode) ++{ ++ struct intel_crtc *intel_crtc; ++ struct drm_crtc *possible_crtc; ++ struct drm_crtc *supported_crtc =NULL; ++ struct drm_encoder *encoder = &intel_output->enc; ++ struct drm_crtc *crtc = NULL; ++ struct drm_device *dev = encoder->dev; ++ struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; ++ struct drm_crtc_helper_funcs *crtc_funcs; ++ int i = -1; ++ ++ /* ++ * Algorithm gets a little messy: ++ * - if the connector already has an assigned crtc, use it (but make ++ * sure it's on first) ++ * - try to find the first unused crtc that can drive this connector, ++ * and use that if we find one ++ * - if there are no unused crtcs available, try to use the first ++ * one we found that supports the connector ++ */ ++ ++ /* See if we already have a CRTC for this connector */ ++ if (encoder->crtc) { ++ crtc = encoder->crtc; ++ /* Make sure the crtc and connector are running */ ++ intel_crtc = to_intel_crtc(crtc); ++ *dpms_mode = intel_crtc->dpms_mode; ++ if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) { ++ crtc_funcs = crtc->helper_private; ++ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); ++ encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); ++ } ++ return crtc; ++ } ++ ++ /* Find an unused one (if possible) */ ++ list_for_each_entry(possible_crtc, &dev->mode_config.crtc_list, head) { ++ i++; ++ if (!(encoder->possible_crtcs & (1 << i))) ++ continue; ++ if (!possible_crtc->enabled) { ++ crtc = possible_crtc; ++ break; ++ } ++ if (!supported_crtc) ++ supported_crtc = possible_crtc; ++ } ++ ++ /* ++ * If we didn't find an unused CRTC, don't use any. ++ */ ++ if (!crtc) { ++ return NULL; ++ } ++ ++ encoder->crtc = crtc; ++ intel_output->load_detect_temp = true; ++ ++ intel_crtc = to_intel_crtc(crtc); ++ *dpms_mode = intel_crtc->dpms_mode; ++ ++ if (!crtc->enabled) { ++ if (!mode) ++ mode = &load_detect_mode; ++ drm_crtc_helper_set_mode(crtc, mode, 0, 0); ++ } else { ++ if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) { ++ crtc_funcs = crtc->helper_private; ++ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); ++ } ++ ++ /* Add this connector to the crtc */ ++ encoder_funcs->mode_set(encoder, &crtc->mode, &crtc->mode); ++ encoder_funcs->commit(encoder); ++ } ++ /* let the connector get through one full cycle before testing */ ++ intel_wait_for_vblank(dev); ++ ++ return crtc; ++} ++ ++void intel_release_load_detect_pipe(struct intel_output *intel_output, int dpms_mode) ++{ ++ struct drm_encoder *encoder = &intel_output->enc; ++ struct drm_device *dev = encoder->dev; ++ struct drm_crtc *crtc = encoder->crtc; ++ struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; ++ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; ++ ++ if (intel_output->load_detect_temp) { ++ encoder->crtc = NULL; ++ intel_output->load_detect_temp = false; ++ crtc->enabled = drm_helper_crtc_in_use(crtc); ++ drm_helper_disable_unused_functions(dev); ++ } ++ ++ /* Switch crtc and output back off if necessary */ ++ if (crtc->enabled && dpms_mode != DRM_MODE_DPMS_ON) { ++ if (encoder->crtc == crtc) ++ encoder_funcs->dpms(encoder, dpms_mode); ++ crtc_funcs->dpms(crtc, dpms_mode); ++ } ++} ++ ++/* Returns the clock of the currently programmed mode of the given pipe. */ ++static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc) ++{ ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ struct intel_crtc *intel_crtc = to_intel_crtc(crtc); ++ int pipe = intel_crtc->pipe; ++ u32 dpll = I915_READ((pipe == 0) ? DPLL_A : DPLL_B); ++ u32 fp; ++ intel_clock_t clock; ++ ++ if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) ++ fp = I915_READ((pipe == 0) ? FPA0 : FPB0); ++ else ++ fp = I915_READ((pipe == 0) ? FPA1 : FPB1); ++ ++ clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; ++ clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; ++ clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT; ++ if (IS_I9XX(dev)) { ++ clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >> ++ DPLL_FPA01_P1_POST_DIV_SHIFT); ++ ++ switch (dpll & DPLL_MODE_MASK) { ++ case DPLLB_MODE_DAC_SERIAL: ++ clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ? ++ 5 : 10; ++ break; ++ case DPLLB_MODE_LVDS: ++ clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ? ++ 7 : 14; ++ break; ++ default: ++ DRM_DEBUG("Unknown DPLL mode %08x in programmed " ++ "mode\n", (int)(dpll & DPLL_MODE_MASK)); ++ return 0; ++ } ++ ++ /* XXX: Handle the 100Mhz refclk */ ++ i9xx_clock(96000, &clock); ++ } else { ++ bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN); ++ ++ if (is_lvds) { ++ clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >> ++ DPLL_FPA01_P1_POST_DIV_SHIFT); ++ clock.p2 = 14; ++ ++ if ((dpll & PLL_REF_INPUT_MASK) == ++ PLLB_REF_INPUT_SPREADSPECTRUMIN) { ++ /* XXX: might not be 66MHz */ ++ i8xx_clock(66000, &clock); ++ } else ++ i8xx_clock(48000, &clock); ++ } else { ++ if (dpll & PLL_P1_DIVIDE_BY_TWO) ++ clock.p1 = 2; ++ else { ++ clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >> ++ DPLL_FPA01_P1_POST_DIV_SHIFT) + 2; ++ } ++ if (dpll & PLL_P2_DIVIDE_BY_4) ++ clock.p2 = 4; ++ else ++ clock.p2 = 2; ++ ++ i8xx_clock(48000, &clock); ++ } ++ } ++ ++ /* XXX: It would be nice to validate the clocks, but we can't reuse ++ * i830PllIsValid() because it relies on the xf86_config connector ++ * configuration being accurate, which it isn't necessarily. ++ */ ++ ++ return clock.dot; ++} ++ ++/** Returns the currently programmed mode of the given pipe. */ ++struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, ++ struct drm_crtc *crtc) ++{ ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ struct intel_crtc *intel_crtc = to_intel_crtc(crtc); ++ int pipe = intel_crtc->pipe; ++ struct drm_display_mode *mode; ++ int htot = I915_READ((pipe == 0) ? HTOTAL_A : HTOTAL_B); ++ int hsync = I915_READ((pipe == 0) ? HSYNC_A : HSYNC_B); ++ int vtot = I915_READ((pipe == 0) ? VTOTAL_A : VTOTAL_B); ++ int vsync = I915_READ((pipe == 0) ? VSYNC_A : VSYNC_B); ++ ++ mode = kzalloc(sizeof(*mode), GFP_KERNEL); ++ if (!mode) ++ return NULL; ++ ++ mode->clock = intel_crtc_clock_get(dev, crtc); ++ mode->hdisplay = (htot & 0xffff) + 1; ++ mode->htotal = ((htot & 0xffff0000) >> 16) + 1; ++ mode->hsync_start = (hsync & 0xffff) + 1; ++ mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1; ++ mode->vdisplay = (vtot & 0xffff) + 1; ++ mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1; ++ mode->vsync_start = (vsync & 0xffff) + 1; ++ mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1; ++ ++ drm_mode_set_name(mode); ++ drm_mode_set_crtcinfo(mode, 0); ++ ++ return mode; ++} ++ ++static void intel_crtc_destroy(struct drm_crtc *crtc) ++{ ++ struct intel_crtc *intel_crtc = to_intel_crtc(crtc); ++ ++ drm_crtc_cleanup(crtc); ++ kfree(intel_crtc); ++} ++ ++static const struct drm_crtc_helper_funcs intel_helper_funcs = { ++ .dpms = intel_crtc_dpms, ++ .mode_fixup = intel_crtc_mode_fixup, ++ .mode_set = intel_crtc_mode_set, ++ .mode_set_base = intel_pipe_set_base, ++ .prepare = intel_crtc_prepare, ++ .commit = intel_crtc_commit, ++}; ++ ++static const struct drm_crtc_funcs intel_crtc_funcs = { ++ .cursor_set = intel_crtc_cursor_set, ++ .cursor_move = intel_crtc_cursor_move, ++ .gamma_set = intel_crtc_gamma_set, ++ .set_config = drm_crtc_helper_set_config, ++ .destroy = intel_crtc_destroy, ++}; ++ ++ ++void intel_crtc_init(struct drm_device *dev, int pipe) ++{ ++ struct intel_crtc *intel_crtc; ++ int i; ++ ++ intel_crtc = kzalloc(sizeof(struct intel_crtc) + (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL); ++ if (intel_crtc == NULL) ++ return; ++ ++ drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs); ++ ++ drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256); ++ intel_crtc->pipe = pipe; ++ for (i = 0; i < 256; i++) { ++ intel_crtc->lut_r[i] = i; ++ intel_crtc->lut_g[i] = i; ++ intel_crtc->lut_b[i] = i; ++ } ++ ++ intel_crtc->cursor_addr = 0; ++ intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF; ++ drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); ++ ++ intel_crtc->mode_set.crtc = &intel_crtc->base; ++ intel_crtc->mode_set.connectors = (struct drm_connector **)(intel_crtc + 1); ++ intel_crtc->mode_set.num_connectors = 0; ++ ++ if (i915_fbpercrtc) { ++ ++ ++ ++ } ++} ++ ++struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe) ++{ ++ struct drm_crtc *crtc = NULL; ++ ++ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { ++ struct intel_crtc *intel_crtc = to_intel_crtc(crtc); ++ if (intel_crtc->pipe == pipe) ++ break; ++ } ++ return crtc; ++} ++ ++int intel_connector_clones(struct drm_device *dev, int type_mask) ++{ ++ int index_mask = 0; ++ struct drm_connector *connector; ++ int entry = 0; ++ ++ list_for_each_entry(connector, &dev->mode_config.connector_list, head) { ++ struct intel_output *intel_output = to_intel_output(connector); ++ if (type_mask & (1 << intel_output->type)) ++ index_mask |= (1 << entry); ++ entry++; ++ } ++ return index_mask; ++} ++ ++ ++static void intel_setup_outputs(struct drm_device *dev) ++{ ++ struct drm_connector *connector; ++ ++ intel_crt_init(dev); ++ ++ /* Set up integrated LVDS */ ++ if (IS_MOBILE(dev) && !IS_I830(dev)) ++ intel_lvds_init(dev); ++ ++ if (IS_I9XX(dev)) { ++ intel_sdvo_init(dev, SDVOB); ++ intel_sdvo_init(dev, SDVOC); ++ } else ++ intel_dvo_init(dev); ++ ++ if (IS_I9XX(dev) && !IS_I915G(dev)) ++ intel_tv_init(dev); ++ ++ list_for_each_entry(connector, &dev->mode_config.connector_list, head) { ++ struct intel_output *intel_output = to_intel_output(connector); ++ struct drm_encoder *encoder = &intel_output->enc; ++ int crtc_mask = 0, clone_mask = 0; ++ ++ /* valid crtcs */ ++ switch(intel_output->type) { ++ case INTEL_OUTPUT_DVO: ++ case INTEL_OUTPUT_SDVO: ++ crtc_mask = ((1 << 0)| ++ (1 << 1)); ++ clone_mask = ((1 << INTEL_OUTPUT_ANALOG) | ++ (1 << INTEL_OUTPUT_DVO) | ++ (1 << INTEL_OUTPUT_SDVO)); ++ break; ++ case INTEL_OUTPUT_ANALOG: ++ crtc_mask = ((1 << 0)| ++ (1 << 1)); ++ clone_mask = ((1 << INTEL_OUTPUT_ANALOG) | ++ (1 << INTEL_OUTPUT_DVO) | ++ (1 << INTEL_OUTPUT_SDVO)); ++ break; ++ case INTEL_OUTPUT_LVDS: ++ crtc_mask = (1 << 1); ++ clone_mask = (1 << INTEL_OUTPUT_LVDS); ++ break; ++ case INTEL_OUTPUT_TVOUT: ++ crtc_mask = ((1 << 0) | ++ (1 << 1)); ++ clone_mask = (1 << INTEL_OUTPUT_TVOUT); ++ break; ++ } ++ encoder->possible_crtcs = crtc_mask; ++ encoder->possible_clones = intel_connector_clones(dev, clone_mask); ++ } ++} ++ ++static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) ++{ ++ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); ++ struct drm_device *dev = fb->dev; ++ ++ if (fb->fbdev) ++ intelfb_remove(dev, fb); ++ ++ drm_framebuffer_cleanup(fb); ++ mutex_lock(&dev->struct_mutex); ++ drm_gem_object_unreference(intel_fb->obj); ++ mutex_unlock(&dev->struct_mutex); ++ ++ kfree(intel_fb); ++} ++ ++static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb, ++ struct drm_file *file_priv, ++ unsigned int *handle) ++{ ++ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); ++ struct drm_gem_object *object = intel_fb->obj; ++ ++ return drm_gem_handle_create(file_priv, object, handle); ++} ++ ++static const struct drm_framebuffer_funcs intel_fb_funcs = { ++ .destroy = intel_user_framebuffer_destroy, ++ .create_handle = intel_user_framebuffer_create_handle, ++}; ++ ++int intel_framebuffer_create(struct drm_device *dev, ++ struct drm_mode_fb_cmd *mode_cmd, ++ struct drm_framebuffer **fb, ++ struct drm_gem_object *obj) ++{ ++ struct intel_framebuffer *intel_fb; ++ int ret; ++ ++ intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); ++ if (!intel_fb) ++ return -ENOMEM; ++ ++ ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs); ++ if (ret) { ++ DRM_ERROR("framebuffer init failed %d\n", ret); ++ return ret; ++ } ++ ++ drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd); ++ ++ intel_fb->obj = obj; ++ ++ *fb = &intel_fb->base; ++ ++ return 0; ++} ++ ++ ++static struct drm_framebuffer * ++intel_user_framebuffer_create(struct drm_device *dev, ++ struct drm_file *filp, ++ struct drm_mode_fb_cmd *mode_cmd) ++{ ++ struct drm_gem_object *obj; ++ struct drm_framebuffer *fb; ++ int ret; ++ ++ obj = drm_gem_object_lookup(dev, filp, mode_cmd->handle); ++ if (!obj) ++ return NULL; ++ ++ ret = intel_framebuffer_create(dev, mode_cmd, &fb, obj); ++ if (ret) { ++ drm_gem_object_unreference(obj); ++ return NULL; ++ } ++ ++ return fb; ++} ++ ++static const struct drm_mode_config_funcs intel_mode_funcs = { ++ .fb_create = intel_user_framebuffer_create, ++ .fb_changed = intelfb_probe, ++}; ++ ++void intel_modeset_init(struct drm_device *dev) ++{ ++ int num_pipe; ++ int i; ++ ++ drm_mode_config_init(dev); ++ ++ dev->mode_config.min_width = 0; ++ dev->mode_config.min_height = 0; ++ ++ dev->mode_config.funcs = (void *)&intel_mode_funcs; ++ ++ if (IS_I965G(dev)) { ++ dev->mode_config.max_width = 8192; ++ dev->mode_config.max_height = 8192; ++ } else { ++ dev->mode_config.max_width = 2048; ++ dev->mode_config.max_height = 2048; ++ } ++ ++ /* set memory base */ ++ if (IS_I9XX(dev)) ++ dev->mode_config.fb_base = pci_resource_start(dev->pdev, 2); ++ else ++ dev->mode_config.fb_base = pci_resource_start(dev->pdev, 0); ++ ++ if (IS_MOBILE(dev) || IS_I9XX(dev)) ++ num_pipe = 2; ++ else ++ num_pipe = 1; ++ DRM_DEBUG("%d display pipe%s available.\n", ++ num_pipe, num_pipe > 1 ? "s" : ""); ++ ++ for (i = 0; i < num_pipe; i++) { ++ intel_crtc_init(dev, i); ++ } ++ ++ intel_setup_outputs(dev); ++} ++ ++void intel_modeset_cleanup(struct drm_device *dev) ++{ ++ drm_mode_config_cleanup(dev); ++} ++ ++ ++/* current intel driver doesn't take advantage of encoders ++ always give back the encoder for the connector ++*/ ++struct drm_encoder *intel_best_encoder(struct drm_connector *connector) ++{ ++ struct intel_output *intel_output = to_intel_output(connector); ++ ++ return &intel_output->enc; ++} +diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h +new file mode 100644 +index 0000000..407edd5 +--- /dev/null ++++ b/drivers/gpu/drm/i915/intel_drv.h +@@ -0,0 +1,146 @@ ++/* ++ * Copyright (c) 2006 Dave Airlie ++ * Copyright (c) 2007-2008 Intel Corporation ++ * Jesse Barnes ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING ++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS ++ * IN THE SOFTWARE. ++ */ ++#ifndef __INTEL_DRV_H__ ++#define __INTEL_DRV_H__ ++ ++#include ++#include ++#include ++#include "drm_crtc.h" ++ ++#include "drm_crtc_helper.h" ++/* ++ * Display related stuff ++ */ ++ ++/* store information about an Ixxx DVO */ ++/* The i830->i865 use multiple DVOs with multiple i2cs */ ++/* the i915, i945 have a single sDVO i2c bus - which is different */ ++#define MAX_OUTPUTS 6 ++/* maximum connectors per crtcs in the mode set */ ++#define INTELFB_CONN_LIMIT 4 ++ ++#define INTEL_I2C_BUS_DVO 1 ++#define INTEL_I2C_BUS_SDVO 2 ++ ++/* these are outputs from the chip - integrated only ++ external chips are via DVO or SDVO output */ ++#define INTEL_OUTPUT_UNUSED 0 ++#define INTEL_OUTPUT_ANALOG 1 ++#define INTEL_OUTPUT_DVO 2 ++#define INTEL_OUTPUT_SDVO 3 ++#define INTEL_OUTPUT_LVDS 4 ++#define INTEL_OUTPUT_TVOUT 5 ++ ++#define INTEL_DVO_CHIP_NONE 0 ++#define INTEL_DVO_CHIP_LVDS 1 ++#define INTEL_DVO_CHIP_TMDS 2 ++#define INTEL_DVO_CHIP_TVOUT 4 ++ ++struct intel_i2c_chan { ++ struct drm_device *drm_dev; /* for getting at dev. private (mmio etc.) */ ++ u32 reg; /* GPIO reg */ ++ struct i2c_adapter adapter; ++ struct i2c_algo_bit_data algo; ++ u8 slave_addr; ++}; ++ ++struct intel_framebuffer { ++ struct drm_framebuffer base; ++ struct drm_gem_object *obj; ++}; ++ ++ ++struct intel_output { ++ struct drm_connector base; ++ ++ struct drm_encoder enc; ++ int type; ++ struct intel_i2c_chan *i2c_bus; /* for control functions */ ++ struct intel_i2c_chan *ddc_bus; /* for DDC only stuff */ ++ bool load_detect_temp; ++ void *dev_priv; ++}; ++ ++struct intel_crtc { ++ struct drm_crtc base; ++ int pipe; ++ int plane; ++ uint32_t cursor_addr; ++ u8 lut_r[256], lut_g[256], lut_b[256]; ++ int dpms_mode; ++ struct intel_framebuffer *fbdev_fb; ++ /* a mode_set for fbdev users on this crtc */ ++ struct drm_mode_set mode_set; ++}; ++ ++#define to_intel_crtc(x) container_of(x, struct intel_crtc, base) ++#define to_intel_output(x) container_of(x, struct intel_output, base) ++#define enc_to_intel_output(x) container_of(x, struct intel_output, enc) ++#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) ++ ++struct intel_i2c_chan *intel_i2c_create(struct drm_device *dev, const u32 reg, ++ const char *name); ++void intel_i2c_destroy(struct intel_i2c_chan *chan); ++int intel_ddc_get_modes(struct intel_output *intel_output); ++extern bool intel_ddc_probe(struct intel_output *intel_output); ++ ++extern void intel_crt_init(struct drm_device *dev); ++extern void intel_sdvo_init(struct drm_device *dev, int output_device); ++extern void intel_dvo_init(struct drm_device *dev); ++extern void intel_tv_init(struct drm_device *dev); ++extern void intel_lvds_init(struct drm_device *dev); ++ ++extern void intel_crtc_load_lut(struct drm_crtc *crtc); ++extern void intel_encoder_prepare (struct drm_encoder *encoder); ++extern void intel_encoder_commit (struct drm_encoder *encoder); ++ ++extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector); ++ ++extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, ++ struct drm_crtc *crtc); ++extern void intel_wait_for_vblank(struct drm_device *dev); ++extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe); ++extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_output *intel_output, ++ struct drm_display_mode *mode, ++ int *dpms_mode); ++extern void intel_release_load_detect_pipe(struct intel_output *intel_output, ++ int dpms_mode); ++ ++extern struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB); ++extern int intel_sdvo_supports_hotplug(struct drm_connector *connector); ++extern void intel_sdvo_set_hotplug(struct drm_connector *connector, int enable); ++extern int intelfb_probe(struct drm_device *dev); ++extern int intelfb_remove(struct drm_device *dev, struct drm_framebuffer *fb); ++extern int intelfb_resize(struct drm_device *dev, struct drm_crtc *crtc); ++extern void intelfb_restore(void); ++extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, ++ u16 blue, int regno); ++ ++extern int intel_framebuffer_create(struct drm_device *dev, ++ struct drm_mode_fb_cmd *mode_cmd, ++ struct drm_framebuffer **fb, ++ struct drm_gem_object *obj); ++#endif /* __INTEL_DRV_H__ */ +diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c +new file mode 100644 +index 0000000..008bfae +--- /dev/null ++++ b/drivers/gpu/drm/i915/intel_dvo.c +@@ -0,0 +1,501 @@ ++/* ++ * Copyright 2006 Dave Airlie ++ * Copyright © 2006-2007 Intel Corporation ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING ++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: ++ * Eric Anholt ++ */ ++#include ++#include "drmP.h" ++#include "drm.h" ++#include "drm_crtc.h" ++#include "intel_drv.h" ++#include "i915_drm.h" ++#include "i915_drv.h" ++#include "dvo.h" ++ ++#define SIL164_ADDR 0x38 ++#define CH7xxx_ADDR 0x76 ++#define TFP410_ADDR 0x38 ++ ++extern struct intel_dvo_dev_ops sil164_ops; ++extern struct intel_dvo_dev_ops ch7xxx_ops; ++extern struct intel_dvo_dev_ops ivch_ops; ++extern struct intel_dvo_dev_ops tfp410_ops; ++extern struct intel_dvo_dev_ops ch7017_ops; ++ ++struct intel_dvo_device intel_dvo_devices[] = { ++ { ++ .type = INTEL_DVO_CHIP_TMDS, ++ .name = "sil164", ++ .dvo_reg = DVOC, ++ .slave_addr = SIL164_ADDR, ++ .dev_ops = &sil164_ops, ++ }, ++ { ++ .type = INTEL_DVO_CHIP_TMDS, ++ .name = "ch7xxx", ++ .dvo_reg = DVOC, ++ .slave_addr = CH7xxx_ADDR, ++ .dev_ops = &ch7xxx_ops, ++ }, ++ { ++ .type = INTEL_DVO_CHIP_LVDS, ++ .name = "ivch", ++ .dvo_reg = DVOA, ++ .slave_addr = 0x02, /* Might also be 0x44, 0x84, 0xc4 */ ++ .dev_ops = &ivch_ops, ++ }, ++ { ++ .type = INTEL_DVO_CHIP_TMDS, ++ .name = "tfp410", ++ .dvo_reg = DVOC, ++ .slave_addr = TFP410_ADDR, ++ .dev_ops = &tfp410_ops, ++ }, ++ { ++ .type = INTEL_DVO_CHIP_LVDS, ++ .name = "ch7017", ++ .dvo_reg = DVOC, ++ .slave_addr = 0x75, ++ .gpio = GPIOE, ++ .dev_ops = &ch7017_ops, ++ } ++}; ++ ++static void intel_dvo_dpms(struct drm_encoder *encoder, int mode) ++{ ++ struct drm_i915_private *dev_priv = encoder->dev->dev_private; ++ struct intel_output *intel_output = enc_to_intel_output(encoder); ++ struct intel_dvo_device *dvo = intel_output->dev_priv; ++ u32 dvo_reg = dvo->dvo_reg; ++ u32 temp = I915_READ(dvo_reg); ++ ++ if (mode == DRM_MODE_DPMS_ON) { ++ I915_WRITE(dvo_reg, temp | DVO_ENABLE); ++ I915_READ(dvo_reg); ++ dvo->dev_ops->dpms(dvo, mode); ++ } else { ++ dvo->dev_ops->dpms(dvo, mode); ++ I915_WRITE(dvo_reg, temp & ~DVO_ENABLE); ++ I915_READ(dvo_reg); ++ } ++} ++ ++static void intel_dvo_save(struct drm_connector *connector) ++{ ++ struct drm_i915_private *dev_priv = connector->dev->dev_private; ++ struct intel_output *intel_output = to_intel_output(connector); ++ struct intel_dvo_device *dvo = intel_output->dev_priv; ++ ++ /* Each output should probably just save the registers it touches, ++ * but for now, use more overkill. ++ */ ++ dev_priv->saveDVOA = I915_READ(DVOA); ++ dev_priv->saveDVOB = I915_READ(DVOB); ++ dev_priv->saveDVOC = I915_READ(DVOC); ++ ++ dvo->dev_ops->save(dvo); ++} ++ ++static void intel_dvo_restore(struct drm_connector *connector) ++{ ++ struct drm_i915_private *dev_priv = connector->dev->dev_private; ++ struct intel_output *intel_output = to_intel_output(connector); ++ struct intel_dvo_device *dvo = intel_output->dev_priv; ++ ++ dvo->dev_ops->restore(dvo); ++ ++ I915_WRITE(DVOA, dev_priv->saveDVOA); ++ I915_WRITE(DVOB, dev_priv->saveDVOB); ++ I915_WRITE(DVOC, dev_priv->saveDVOC); ++} ++ ++static int intel_dvo_mode_valid(struct drm_connector *connector, ++ struct drm_display_mode *mode) ++{ ++ struct intel_output *intel_output = to_intel_output(connector); ++ struct intel_dvo_device *dvo = intel_output->dev_priv; ++ ++ if (mode->flags & DRM_MODE_FLAG_DBLSCAN) ++ return MODE_NO_DBLESCAN; ++ ++ /* XXX: Validate clock range */ ++ ++ if (dvo->panel_fixed_mode) { ++ if (mode->hdisplay > dvo->panel_fixed_mode->hdisplay) ++ return MODE_PANEL; ++ if (mode->vdisplay > dvo->panel_fixed_mode->vdisplay) ++ return MODE_PANEL; ++ } ++ ++ return dvo->dev_ops->mode_valid(dvo, mode); ++} ++ ++static bool intel_dvo_mode_fixup(struct drm_encoder *encoder, ++ struct drm_display_mode *mode, ++ struct drm_display_mode *adjusted_mode) ++{ ++ struct intel_output *intel_output = enc_to_intel_output(encoder); ++ struct intel_dvo_device *dvo = intel_output->dev_priv; ++ ++ /* If we have timings from the BIOS for the panel, put them in ++ * to the adjusted mode. The CRTC will be set up for this mode, ++ * with the panel scaling set up to source from the H/VDisplay ++ * of the original mode. ++ */ ++ if (dvo->panel_fixed_mode != NULL) { ++#define C(x) adjusted_mode->x = dvo->panel_fixed_mode->x ++ C(hdisplay); ++ C(hsync_start); ++ C(hsync_end); ++ C(htotal); ++ C(vdisplay); ++ C(vsync_start); ++ C(vsync_end); ++ C(vtotal); ++ C(clock); ++ drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V); ++#undef C ++ } ++ ++ if (dvo->dev_ops->mode_fixup) ++ return dvo->dev_ops->mode_fixup(dvo, mode, adjusted_mode); ++ ++ return true; ++} ++ ++static void intel_dvo_mode_set(struct drm_encoder *encoder, ++ struct drm_display_mode *mode, ++ struct drm_display_mode *adjusted_mode) ++{ ++ struct drm_device *dev = encoder->dev; ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); ++ struct intel_output *intel_output = enc_to_intel_output(encoder); ++ struct intel_dvo_device *dvo = intel_output->dev_priv; ++ int pipe = intel_crtc->pipe; ++ u32 dvo_val; ++ u32 dvo_reg = dvo->dvo_reg, dvo_srcdim_reg; ++ int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; ++ ++ switch (dvo_reg) { ++ case DVOA: ++ default: ++ dvo_srcdim_reg = DVOA_SRCDIM; ++ break; ++ case DVOB: ++ dvo_srcdim_reg = DVOB_SRCDIM; ++ break; ++ case DVOC: ++ dvo_srcdim_reg = DVOC_SRCDIM; ++ break; ++ } ++ ++ dvo->dev_ops->mode_set(dvo, mode, adjusted_mode); ++ ++ /* Save the data order, since I don't know what it should be set to. */ ++ dvo_val = I915_READ(dvo_reg) & ++ (DVO_PRESERVE_MASK | DVO_DATA_ORDER_GBRG); ++ dvo_val |= DVO_DATA_ORDER_FP | DVO_BORDER_ENABLE | ++ DVO_BLANK_ACTIVE_HIGH; ++ ++ if (pipe == 1) ++ dvo_val |= DVO_PIPE_B_SELECT; ++ dvo_val |= DVO_PIPE_STALL; ++ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) ++ dvo_val |= DVO_HSYNC_ACTIVE_HIGH; ++ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) ++ dvo_val |= DVO_VSYNC_ACTIVE_HIGH; ++ ++ I915_WRITE(dpll_reg, I915_READ(dpll_reg) | DPLL_DVO_HIGH_SPEED); ++ ++ /*I915_WRITE(DVOB_SRCDIM, ++ (adjusted_mode->hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) | ++ (adjusted_mode->VDisplay << DVO_SRCDIM_VERTICAL_SHIFT));*/ ++ I915_WRITE(dvo_srcdim_reg, ++ (adjusted_mode->hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) | ++ (adjusted_mode->vdisplay << DVO_SRCDIM_VERTICAL_SHIFT)); ++ /*I915_WRITE(DVOB, dvo_val);*/ ++ I915_WRITE(dvo_reg, dvo_val); ++} ++ ++/** ++ * Detect the output connection on our DVO device. ++ * ++ * Unimplemented. ++ */ ++static enum drm_connector_status intel_dvo_detect(struct drm_connector *connector) ++{ ++ struct intel_output *intel_output = to_intel_output(connector); ++ struct intel_dvo_device *dvo = intel_output->dev_priv; ++ ++ return dvo->dev_ops->detect(dvo); ++} ++ ++static int intel_dvo_get_modes(struct drm_connector *connector) ++{ ++ struct intel_output *intel_output = to_intel_output(connector); ++ struct intel_dvo_device *dvo = intel_output->dev_priv; ++ ++ /* We should probably have an i2c driver get_modes function for those ++ * devices which will have a fixed set of modes determined by the chip ++ * (TV-out, for example), but for now with just TMDS and LVDS, ++ * that's not the case. ++ */ ++ intel_ddc_get_modes(intel_output); ++ if (!list_empty(&connector->probed_modes)) ++ return 1; ++ ++ ++ if (dvo->panel_fixed_mode != NULL) { ++ struct drm_display_mode *mode; ++ mode = drm_mode_duplicate(connector->dev, dvo->panel_fixed_mode); ++ if (mode) { ++ drm_mode_probed_add(connector, mode); ++ return 1; ++ } ++ } ++ return 0; ++} ++ ++static void intel_dvo_destroy (struct drm_connector *connector) ++{ ++ struct intel_output *intel_output = to_intel_output(connector); ++ struct intel_dvo_device *dvo = intel_output->dev_priv; ++ ++ if (dvo) { ++ if (dvo->dev_ops->destroy) ++ dvo->dev_ops->destroy(dvo); ++ if (dvo->panel_fixed_mode) ++ kfree(dvo->panel_fixed_mode); ++ /* no need, in i830_dvoices[] now */ ++ //kfree(dvo); ++ } ++ if (intel_output->i2c_bus) ++ intel_i2c_destroy(intel_output->i2c_bus); ++ if (intel_output->ddc_bus) ++ intel_i2c_destroy(intel_output->ddc_bus); ++ drm_sysfs_connector_remove(connector); ++ drm_connector_cleanup(connector); ++ kfree(intel_output); ++} ++ ++#ifdef RANDR_GET_CRTC_INTERFACE ++static struct drm_crtc *intel_dvo_get_crtc(struct drm_connector *connector) ++{ ++ struct drm_device *dev = connector->dev; ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ struct intel_output *intel_output = to_intel_output(connector); ++ struct intel_dvo_device *dvo = intel_output->dev_priv; ++ int pipe = !!(I915_READ(dvo->dvo_reg) & SDVO_PIPE_B_SELECT); ++ ++ return intel_pipe_to_crtc(pScrn, pipe); ++} ++#endif ++ ++static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = { ++ .dpms = intel_dvo_dpms, ++ .mode_fixup = intel_dvo_mode_fixup, ++ .prepare = intel_encoder_prepare, ++ .mode_set = intel_dvo_mode_set, ++ .commit = intel_encoder_commit, ++}; ++ ++static const struct drm_connector_funcs intel_dvo_connector_funcs = { ++ .save = intel_dvo_save, ++ .restore = intel_dvo_restore, ++ .detect = intel_dvo_detect, ++ .destroy = intel_dvo_destroy, ++ .fill_modes = drm_helper_probe_single_connector_modes, ++}; ++ ++static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs = { ++ .mode_valid = intel_dvo_mode_valid, ++ .get_modes = intel_dvo_get_modes, ++ .best_encoder = intel_best_encoder, ++}; ++ ++void intel_dvo_enc_destroy(struct drm_encoder *encoder) ++{ ++ drm_encoder_cleanup(encoder); ++} ++ ++static const struct drm_encoder_funcs intel_dvo_enc_funcs = { ++ .destroy = intel_dvo_enc_destroy, ++}; ++ ++ ++/** ++ * Attempts to get a fixed panel timing for LVDS (currently only the i830). ++ * ++ * Other chips with DVO LVDS will need to extend this to deal with the LVDS ++ * chip being on DVOB/C and having multiple pipes. ++ */ ++static struct drm_display_mode * ++intel_dvo_get_current_mode (struct drm_connector *connector) ++{ ++ struct drm_device *dev = connector->dev; ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ struct intel_output *intel_output = to_intel_output(connector); ++ struct intel_dvo_device *dvo = intel_output->dev_priv; ++ uint32_t dvo_reg = dvo->dvo_reg; ++ uint32_t dvo_val = I915_READ(dvo_reg); ++ struct drm_display_mode *mode = NULL; ++ ++ /* If the DVO port is active, that'll be the LVDS, so we can pull out ++ * its timings to get how the BIOS set up the panel. ++ */ ++ if (dvo_val & DVO_ENABLE) { ++ struct drm_crtc *crtc; ++ int pipe = (dvo_val & DVO_PIPE_B_SELECT) ? 1 : 0; ++ ++ crtc = intel_get_crtc_from_pipe(dev, pipe); ++ if (crtc) { ++ mode = intel_crtc_mode_get(dev, crtc); ++ ++ if (mode) { ++ mode->type |= DRM_MODE_TYPE_PREFERRED; ++ if (dvo_val & DVO_HSYNC_ACTIVE_HIGH) ++ mode->flags |= DRM_MODE_FLAG_PHSYNC; ++ if (dvo_val & DVO_VSYNC_ACTIVE_HIGH) ++ mode->flags |= DRM_MODE_FLAG_PVSYNC; ++ } ++ } ++ } ++ return mode; ++} ++ ++void intel_dvo_init(struct drm_device *dev) ++{ ++ struct intel_output *intel_output; ++ struct intel_dvo_device *dvo; ++ struct intel_i2c_chan *i2cbus = NULL; ++ int ret = 0; ++ int i; ++ int gpio_inited = 0; ++ int encoder_type = DRM_MODE_ENCODER_NONE; ++ intel_output = kzalloc (sizeof(struct intel_output), GFP_KERNEL); ++ if (!intel_output) ++ return; ++ ++ /* Set up the DDC bus */ ++ intel_output->ddc_bus = intel_i2c_create(dev, GPIOD, "DVODDC_D"); ++ if (!intel_output->ddc_bus) ++ goto free_intel; ++ ++ /* Now, try to find a controller */ ++ for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) { ++ struct drm_connector *connector = &intel_output->base; ++ int gpio; ++ ++ dvo = &intel_dvo_devices[i]; ++ ++ /* Allow the I2C driver info to specify the GPIO to be used in ++ * special cases, but otherwise default to what's defined ++ * in the spec. ++ */ ++ if (dvo->gpio != 0) ++ gpio = dvo->gpio; ++ else if (dvo->type == INTEL_DVO_CHIP_LVDS) ++ gpio = GPIOB; ++ else ++ gpio = GPIOE; ++ ++ /* Set up the I2C bus necessary for the chip we're probing. ++ * It appears that everything is on GPIOE except for panels ++ * on i830 laptops, which are on GPIOB (DVOA). ++ */ ++ if (gpio_inited != gpio) { ++ if (i2cbus != NULL) ++ intel_i2c_destroy(i2cbus); ++ if (!(i2cbus = intel_i2c_create(dev, gpio, ++ gpio == GPIOB ? "DVOI2C_B" : "DVOI2C_E"))) { ++ continue; ++ } ++ gpio_inited = gpio; ++ } ++ ++ if (dvo->dev_ops!= NULL) ++ ret = dvo->dev_ops->init(dvo, i2cbus); ++ else ++ ret = false; ++ ++ if (!ret) ++ continue; ++ ++ intel_output->type = INTEL_OUTPUT_DVO; ++ switch (dvo->type) { ++ case INTEL_DVO_CHIP_TMDS: ++ drm_connector_init(dev, connector, ++ &intel_dvo_connector_funcs, ++ DRM_MODE_CONNECTOR_DVII); ++ encoder_type = DRM_MODE_ENCODER_TMDS; ++ break; ++ case INTEL_DVO_CHIP_LVDS: ++ drm_connector_init(dev, connector, ++ &intel_dvo_connector_funcs, ++ DRM_MODE_CONNECTOR_LVDS); ++ encoder_type = DRM_MODE_ENCODER_LVDS; ++ break; ++ } ++ ++ drm_connector_helper_add(connector, ++ &intel_dvo_connector_helper_funcs); ++ connector->display_info.subpixel_order = SubPixelHorizontalRGB; ++ connector->interlace_allowed = false; ++ connector->doublescan_allowed = false; ++ ++ intel_output->dev_priv = dvo; ++ intel_output->i2c_bus = i2cbus; ++ ++ drm_encoder_init(dev, &intel_output->enc, ++ &intel_dvo_enc_funcs, encoder_type); ++ drm_encoder_helper_add(&intel_output->enc, ++ &intel_dvo_helper_funcs); ++ ++ drm_mode_connector_attach_encoder(&intel_output->base, ++ &intel_output->enc); ++ if (dvo->type == INTEL_DVO_CHIP_LVDS) { ++ /* For our LVDS chipsets, we should hopefully be able ++ * to dig the fixed panel mode out of the BIOS data. ++ * However, it's in a different format from the BIOS ++ * data on chipsets with integrated LVDS (stored in AIM ++ * headers, likely), so for now, just get the current ++ * mode being output through DVO. ++ */ ++ dvo->panel_fixed_mode = ++ intel_dvo_get_current_mode(connector); ++ dvo->panel_wants_dither = true; ++ } ++ ++ drm_sysfs_connector_add(connector); ++ return; ++ } ++ ++ intel_i2c_destroy(intel_output->ddc_bus); ++ /* Didn't find a chip, so tear down. */ ++ if (i2cbus != NULL) ++ intel_i2c_destroy(i2cbus); ++free_intel: ++ kfree(intel_output); ++} +diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c +new file mode 100644 +index 0000000..bbf3e7f +--- /dev/null ++++ b/drivers/gpu/drm/i915/intel_fb.c +@@ -0,0 +1,926 @@ ++/* ++ * Copyright © 2007 David Airlie ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING ++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: ++ * David Airlie ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "drmP.h" ++#include "drm.h" ++#include "drm_crtc.h" ++#include "intel_drv.h" ++#include "i915_drm.h" ++#include "i915_drv.h" ++ ++struct intelfb_par { ++ struct drm_device *dev; ++ struct drm_display_mode *our_mode; ++ struct intel_framebuffer *intel_fb; ++ int crtc_count; ++ /* crtc currently bound to this */ ++ uint32_t crtc_ids[2]; ++}; ++ ++static int intelfb_setcolreg(unsigned regno, unsigned red, unsigned green, ++ unsigned blue, unsigned transp, ++ struct fb_info *info) ++{ ++ struct intelfb_par *par = info->par; ++ struct drm_device *dev = par->dev; ++ struct drm_crtc *crtc; ++ int i; ++ ++ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { ++ struct intel_crtc *intel_crtc = to_intel_crtc(crtc); ++ struct drm_mode_set *modeset = &intel_crtc->mode_set; ++ struct drm_framebuffer *fb = modeset->fb; ++ ++ for (i = 0; i < par->crtc_count; i++) ++ if (crtc->base.id == par->crtc_ids[i]) ++ break; ++ ++ if (i == par->crtc_count) ++ continue; ++ ++ ++ if (regno > 255) ++ return 1; ++ ++ if (fb->depth == 8) { ++ intel_crtc_fb_gamma_set(crtc, red, green, blue, regno); ++ return 0; ++ } ++ ++ if (regno < 16) { ++ switch (fb->depth) { ++ case 15: ++ fb->pseudo_palette[regno] = ((red & 0xf800) >> 1) | ++ ((green & 0xf800) >> 6) | ++ ((blue & 0xf800) >> 11); ++ break; ++ case 16: ++ fb->pseudo_palette[regno] = (red & 0xf800) | ++ ((green & 0xfc00) >> 5) | ++ ((blue & 0xf800) >> 11); ++ break; ++ case 24: ++ case 32: ++ fb->pseudo_palette[regno] = ((red & 0xff00) << 8) | ++ (green & 0xff00) | ++ ((blue & 0xff00) >> 8); ++ break; ++ } ++ } ++ } ++ return 0; ++} ++ ++static int intelfb_check_var(struct fb_var_screeninfo *var, ++ struct fb_info *info) ++{ ++ struct intelfb_par *par = info->par; ++ struct intel_framebuffer *intel_fb = par->intel_fb; ++ struct drm_framebuffer *fb = &intel_fb->base; ++ int depth; ++ ++ if (var->pixclock == -1 || !var->pixclock) ++ return -EINVAL; ++ ++ /* Need to resize the fb object !!! */ ++ if (var->xres > fb->width || var->yres > fb->height) { ++ DRM_ERROR("Requested width/height is greater than current fb object %dx%d > %dx%d\n",var->xres,var->yres,fb->width,fb->height); ++ DRM_ERROR("Need resizing code.\n"); ++ return -EINVAL; ++ } ++ ++ switch (var->bits_per_pixel) { ++ case 16: ++ depth = (var->green.length == 6) ? 16 : 15; ++ break; ++ case 32: ++ depth = (var->transp.length > 0) ? 32 : 24; ++ break; ++ default: ++ depth = var->bits_per_pixel; ++ break; ++ } ++ ++ switch (depth) { ++ case 8: ++ var->red.offset = 0; ++ var->green.offset = 0; ++ var->blue.offset = 0; ++ var->red.length = 8; ++ var->green.length = 8; ++ var->blue.length = 8; ++ var->transp.length = 0; ++ var->transp.offset = 0; ++ break; ++ case 15: ++ var->red.offset = 10; ++ var->green.offset = 5; ++ var->blue.offset = 0; ++ var->red.length = 5; ++ var->green.length = 5; ++ var->blue.length = 5; ++ var->transp.length = 1; ++ var->transp.offset = 15; ++ break; ++ case 16: ++ var->red.offset = 11; ++ var->green.offset = 5; ++ var->blue.offset = 0; ++ var->red.length = 5; ++ var->green.length = 6; ++ var->blue.length = 5; ++ var->transp.length = 0; ++ var->transp.offset = 0; ++ break; ++ case 24: ++ var->red.offset = 16; ++ var->green.offset = 8; ++ var->blue.offset = 0; ++ var->red.length = 8; ++ var->green.length = 8; ++ var->blue.length = 8; ++ var->transp.length = 0; ++ var->transp.offset = 0; ++ break; ++ case 32: ++ var->red.offset = 16; ++ var->green.offset = 8; ++ var->blue.offset = 0; ++ var->red.length = 8; ++ var->green.length = 8; ++ var->blue.length = 8; ++ var->transp.length = 8; ++ var->transp.offset = 24; ++ break; ++ default: ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++/* this will let fbcon do the mode init */ ++/* FIXME: take mode config lock? */ ++static int intelfb_set_par(struct fb_info *info) ++{ ++ struct intelfb_par *par = info->par; ++ struct drm_device *dev = par->dev; ++ struct fb_var_screeninfo *var = &info->var; ++ int i; ++ ++ DRM_DEBUG("%d %d\n", var->xres, var->pixclock); ++ ++ if (var->pixclock != -1) { ++ ++ DRM_ERROR("PIXEL CLCOK SET\n"); ++ return -EINVAL; ++ } else { ++ struct drm_crtc *crtc; ++ int ret; ++ ++ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { ++ struct intel_crtc *intel_crtc = to_intel_crtc(crtc); ++ ++ for (i = 0; i < par->crtc_count; i++) ++ if (crtc->base.id == par->crtc_ids[i]) ++ break; ++ ++ if (i == par->crtc_count) ++ continue; ++ ++ if (crtc->fb == intel_crtc->mode_set.fb) { ++ mutex_lock(&dev->mode_config.mutex); ++ ret = crtc->funcs->set_config(&intel_crtc->mode_set); ++ mutex_unlock(&dev->mode_config.mutex); ++ if (ret) ++ return ret; ++ } ++ } ++ return 0; ++ } ++} ++ ++static int intelfb_pan_display(struct fb_var_screeninfo *var, ++ struct fb_info *info) ++{ ++ struct intelfb_par *par = info->par; ++ struct drm_device *dev = par->dev; ++ struct drm_mode_set *modeset; ++ struct drm_crtc *crtc; ++ struct intel_crtc *intel_crtc; ++ int ret = 0; ++ int i; ++ ++ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { ++ for (i = 0; i < par->crtc_count; i++) ++ if (crtc->base.id == par->crtc_ids[i]) ++ break; ++ ++ if (i == par->crtc_count) ++ continue; ++ ++ intel_crtc = to_intel_crtc(crtc); ++ modeset = &intel_crtc->mode_set; ++ ++ modeset->x = var->xoffset; ++ modeset->y = var->yoffset; ++ ++ if (modeset->num_connectors) { ++ mutex_lock(&dev->mode_config.mutex); ++ ret = crtc->funcs->set_config(modeset); ++ mutex_unlock(&dev->mode_config.mutex); ++ if (!ret) { ++ info->var.xoffset = var->xoffset; ++ info->var.yoffset = var->yoffset; ++ } ++ } ++ } ++ ++ return ret; ++} ++ ++static void intelfb_on(struct fb_info *info) ++{ ++ struct intelfb_par *par = info->par; ++ struct drm_device *dev = par->dev; ++ struct drm_crtc *crtc; ++ struct drm_encoder *encoder; ++ int i; ++ ++ /* ++ * For each CRTC in this fb, find all associated encoders ++ * and turn them off, then turn off the CRTC. ++ */ ++ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { ++ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; ++ ++ for (i = 0; i < par->crtc_count; i++) ++ if (crtc->base.id == par->crtc_ids[i]) ++ break; ++ ++ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); ++ ++ /* Found a CRTC on this fb, now find encoders */ ++ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { ++ if (encoder->crtc == crtc) { ++ struct drm_encoder_helper_funcs *encoder_funcs; ++ encoder_funcs = encoder->helper_private; ++ encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); ++ } ++ } ++ } ++} ++ ++static void intelfb_off(struct fb_info *info, int dpms_mode) ++{ ++ struct intelfb_par *par = info->par; ++ struct drm_device *dev = par->dev; ++ struct drm_crtc *crtc; ++ struct drm_encoder *encoder; ++ int i; ++ ++ /* ++ * For each CRTC in this fb, find all associated encoders ++ * and turn them off, then turn off the CRTC. ++ */ ++ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { ++ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; ++ ++ for (i = 0; i < par->crtc_count; i++) ++ if (crtc->base.id == par->crtc_ids[i]) ++ break; ++ ++ /* Found a CRTC on this fb, now find encoders */ ++ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { ++ if (encoder->crtc == crtc) { ++ struct drm_encoder_helper_funcs *encoder_funcs; ++ encoder_funcs = encoder->helper_private; ++ encoder_funcs->dpms(encoder, dpms_mode); ++ } ++ } ++ if (dpms_mode == DRM_MODE_DPMS_OFF) ++ crtc_funcs->dpms(crtc, dpms_mode); ++ } ++} ++ ++int intelfb_blank(int blank, struct fb_info *info) ++{ ++ switch (blank) { ++ case FB_BLANK_UNBLANK: ++ intelfb_on(info); ++ break; ++ case FB_BLANK_NORMAL: ++ intelfb_off(info, DRM_MODE_DPMS_STANDBY); ++ break; ++ case FB_BLANK_HSYNC_SUSPEND: ++ intelfb_off(info, DRM_MODE_DPMS_STANDBY); ++ break; ++ case FB_BLANK_VSYNC_SUSPEND: ++ intelfb_off(info, DRM_MODE_DPMS_SUSPEND); ++ break; ++ case FB_BLANK_POWERDOWN: ++ intelfb_off(info, DRM_MODE_DPMS_OFF); ++ break; ++ } ++ return 0; ++} ++ ++static struct fb_ops intelfb_ops = { ++ .owner = THIS_MODULE, ++ .fb_check_var = intelfb_check_var, ++ .fb_set_par = intelfb_set_par, ++ .fb_setcolreg = intelfb_setcolreg, ++ .fb_fillrect = cfb_fillrect, ++ .fb_copyarea = cfb_copyarea, ++ .fb_imageblit = cfb_imageblit, ++ .fb_pan_display = intelfb_pan_display, ++ .fb_blank = intelfb_blank, ++}; ++ ++/** ++ * Curretly it is assumed that the old framebuffer is reused. ++ * ++ * LOCKING ++ * caller should hold the mode config lock. ++ * ++ */ ++int intelfb_resize(struct drm_device *dev, struct drm_crtc *crtc) ++{ ++ struct fb_info *info; ++ struct drm_framebuffer *fb; ++ struct drm_display_mode *mode = crtc->desired_mode; ++ ++ fb = crtc->fb; ++ if (!fb) ++ return 1; ++ ++ info = fb->fbdev; ++ if (!info) ++ return 1; ++ ++ if (!mode) ++ return 1; ++ ++ info->var.xres = mode->hdisplay; ++ info->var.right_margin = mode->hsync_start - mode->hdisplay; ++ info->var.hsync_len = mode->hsync_end - mode->hsync_start; ++ info->var.left_margin = mode->htotal - mode->hsync_end; ++ info->var.yres = mode->vdisplay; ++ info->var.lower_margin = mode->vsync_start - mode->vdisplay; ++ info->var.vsync_len = mode->vsync_end - mode->vsync_start; ++ info->var.upper_margin = mode->vtotal - mode->vsync_end; ++ info->var.pixclock = 10000000 / mode->htotal * 1000 / mode->vtotal * 100; ++ /* avoid overflow */ ++ info->var.pixclock = info->var.pixclock * 1000 / mode->vrefresh; ++ ++ return 0; ++} ++EXPORT_SYMBOL(intelfb_resize); ++ ++static struct drm_mode_set kernelfb_mode; ++ ++int intelfb_panic(struct notifier_block *n, unsigned long ununsed, ++ void *panic_str) ++{ ++ DRM_ERROR("panic occurred, switching back to text console\n"); ++ ++ intelfb_restore(); ++ return 0; ++} ++EXPORT_SYMBOL(intelfb_panic); ++ ++static struct notifier_block paniced = { ++ .notifier_call = intelfb_panic, ++}; ++ ++int intelfb_create(struct drm_device *dev, uint32_t fb_width, ++ uint32_t fb_height, uint32_t surface_width, ++ uint32_t surface_height, ++ struct intel_framebuffer **intel_fb_p) ++{ ++ struct fb_info *info; ++ struct intelfb_par *par; ++ struct drm_framebuffer *fb; ++ struct intel_framebuffer *intel_fb; ++ struct drm_mode_fb_cmd mode_cmd; ++ struct drm_gem_object *fbo = NULL; ++ struct drm_i915_gem_object *obj_priv; ++ struct device *device = &dev->pdev->dev; ++ int size, ret, mmio_bar = IS_I9XX(dev) ? 0 : 1; ++ ++ mode_cmd.width = surface_width; ++ mode_cmd.height = surface_height; ++ ++ mode_cmd.bpp = 32; ++ mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 1) / 8), 64); ++ mode_cmd.depth = 24; ++ ++ size = mode_cmd.pitch * mode_cmd.height; ++ size = ALIGN(size, PAGE_SIZE); ++ fbo = drm_gem_object_alloc(dev, size); ++ if (!fbo) { ++ printk(KERN_ERR "failed to allocate framebuffer\n"); ++ ret = -ENOMEM; ++ goto out; ++ } ++ obj_priv = fbo->driver_private; ++ ++ mutex_lock(&dev->struct_mutex); ++ ++ ret = i915_gem_object_pin(fbo, PAGE_SIZE); ++ if (ret) { ++ DRM_ERROR("failed to pin fb: %d\n", ret); ++ goto out_unref; ++ } ++ ++ /* Flush everything out, we'll be doing GTT only from now on */ ++ i915_gem_object_set_to_gtt_domain(fbo, 1); ++ ++ ret = intel_framebuffer_create(dev, &mode_cmd, &fb, fbo); ++ if (ret) { ++ DRM_ERROR("failed to allocate fb.\n"); ++ goto out_unref; ++ } ++ ++ list_add(&fb->filp_head, &dev->mode_config.fb_kernel_list); ++ ++ intel_fb = to_intel_framebuffer(fb); ++ *intel_fb_p = intel_fb; ++ ++ info = framebuffer_alloc(sizeof(struct intelfb_par), device); ++ if (!info) { ++ ret = -ENOMEM; ++ goto out_unref; ++ } ++ ++ par = info->par; ++ ++ strcpy(info->fix.id, "inteldrmfb"); ++ info->fix.type = FB_TYPE_PACKED_PIXELS; ++ info->fix.visual = FB_VISUAL_TRUECOLOR; ++ info->fix.type_aux = 0; ++ info->fix.xpanstep = 1; /* doing it in hw */ ++ info->fix.ypanstep = 1; /* doing it in hw */ ++ info->fix.ywrapstep = 0; ++ info->fix.accel = FB_ACCEL_I830; ++ info->fix.type_aux = 0; ++ ++ info->flags = FBINFO_DEFAULT; ++ ++ info->fbops = &intelfb_ops; ++ ++ info->fix.line_length = fb->pitch; ++ info->fix.smem_start = dev->mode_config.fb_base + obj_priv->gtt_offset; ++ info->fix.smem_len = size; ++ ++ info->flags = FBINFO_DEFAULT; ++ ++ info->screen_base = ioremap_wc(dev->agp->base + obj_priv->gtt_offset, ++ size); ++ if (!info->screen_base) { ++ ret = -ENOSPC; ++ goto out_unref; ++ } ++ info->screen_size = size; ++ ++// memset(info->screen_base, 0, size); ++ ++ info->pseudo_palette = fb->pseudo_palette; ++ info->var.xres_virtual = fb->width; ++ info->var.yres_virtual = fb->height; ++ info->var.bits_per_pixel = fb->bits_per_pixel; ++ info->var.xoffset = 0; ++ info->var.yoffset = 0; ++ info->var.activate = FB_ACTIVATE_NOW; ++ info->var.height = -1; ++ info->var.width = -1; ++ ++ info->var.xres = fb_width; ++ info->var.yres = fb_height; ++ ++ /* FIXME: we really shouldn't expose mmio space at all */ ++ info->fix.mmio_start = pci_resource_start(dev->pdev, mmio_bar); ++ info->fix.mmio_len = pci_resource_len(dev->pdev, mmio_bar); ++ ++ info->pixmap.size = 64*1024; ++ info->pixmap.buf_align = 8; ++ info->pixmap.access_align = 32; ++ info->pixmap.flags = FB_PIXMAP_SYSTEM; ++ info->pixmap.scan_align = 1; ++ ++ switch(fb->depth) { ++ case 8: ++ info->var.red.offset = 0; ++ info->var.green.offset = 0; ++ info->var.blue.offset = 0; ++ info->var.red.length = 8; /* 8bit DAC */ ++ info->var.green.length = 8; ++ info->var.blue.length = 8; ++ info->var.transp.offset = 0; ++ info->var.transp.length = 0; ++ break; ++ case 15: ++ info->var.red.offset = 10; ++ info->var.green.offset = 5; ++ info->var.blue.offset = 0; ++ info->var.red.length = 5; ++ info->var.green.length = 5; ++ info->var.blue.length = 5; ++ info->var.transp.offset = 15; ++ info->var.transp.length = 1; ++ break; ++ case 16: ++ info->var.red.offset = 11; ++ info->var.green.offset = 5; ++ info->var.blue.offset = 0; ++ info->var.red.length = 5; ++ info->var.green.length = 6; ++ info->var.blue.length = 5; ++ info->var.transp.offset = 0; ++ break; ++ case 24: ++ info->var.red.offset = 16; ++ info->var.green.offset = 8; ++ info->var.blue.offset = 0; ++ info->var.red.length = 8; ++ info->var.green.length = 8; ++ info->var.blue.length = 8; ++ info->var.transp.offset = 0; ++ info->var.transp.length = 0; ++ break; ++ case 32: ++ info->var.red.offset = 16; ++ info->var.green.offset = 8; ++ info->var.blue.offset = 0; ++ info->var.red.length = 8; ++ info->var.green.length = 8; ++ info->var.blue.length = 8; ++ info->var.transp.offset = 24; ++ info->var.transp.length = 8; ++ break; ++ default: ++ break; ++ } ++ ++ fb->fbdev = info; ++ ++ par->intel_fb = intel_fb; ++ par->dev = dev; ++ ++ /* To allow resizeing without swapping buffers */ ++ printk("allocated %dx%d fb: 0x%08x, bo %p\n", intel_fb->base.width, ++ intel_fb->base.height, obj_priv->gtt_offset, fbo); ++ ++ mutex_unlock(&dev->struct_mutex); ++ return 0; ++ ++out_unref: ++ drm_gem_object_unreference(fbo); ++ mutex_unlock(&dev->struct_mutex); ++out: ++ return ret; ++} ++ ++static int intelfb_multi_fb_probe_crtc(struct drm_device *dev, struct drm_crtc *crtc) ++{ ++ struct intel_crtc *intel_crtc = to_intel_crtc(crtc); ++ struct intel_framebuffer *intel_fb; ++ struct drm_framebuffer *fb; ++ struct drm_connector *connector; ++ struct fb_info *info; ++ struct intelfb_par *par; ++ struct drm_mode_set *modeset; ++ unsigned int width, height; ++ int new_fb = 0; ++ int ret, i, conn_count; ++ ++ if (!drm_helper_crtc_in_use(crtc)) ++ return 0; ++ ++ if (!crtc->desired_mode) ++ return 0; ++ ++ width = crtc->desired_mode->hdisplay; ++ height = crtc->desired_mode->vdisplay; ++ ++ /* is there an fb bound to this crtc already */ ++ if (!intel_crtc->mode_set.fb) { ++ ret = intelfb_create(dev, width, height, width, height, &intel_fb); ++ if (ret) ++ return -EINVAL; ++ new_fb = 1; ++ } else { ++ fb = intel_crtc->mode_set.fb; ++ intel_fb = to_intel_framebuffer(fb); ++ if ((intel_fb->base.width < width) || (intel_fb->base.height < height)) ++ return -EINVAL; ++ } ++ ++ info = intel_fb->base.fbdev; ++ par = info->par; ++ ++ modeset = &intel_crtc->mode_set; ++ modeset->fb = &intel_fb->base; ++ conn_count = 0; ++ list_for_each_entry(connector, &dev->mode_config.connector_list, head) { ++ if (connector->encoder) ++ if (connector->encoder->crtc == modeset->crtc) { ++ modeset->connectors[conn_count] = connector; ++ conn_count++; ++ if (conn_count > INTELFB_CONN_LIMIT) ++ BUG(); ++ } ++ } ++ ++ for (i = conn_count; i < INTELFB_CONN_LIMIT; i++) ++ modeset->connectors[i] = NULL; ++ ++ par->crtc_ids[0] = crtc->base.id; ++ ++ modeset->num_connectors = conn_count; ++ if (modeset->mode != modeset->crtc->desired_mode) ++ modeset->mode = modeset->crtc->desired_mode; ++ ++ par->crtc_count = 1; ++ ++ if (new_fb) { ++ info->var.pixclock = -1; ++ if (register_framebuffer(info) < 0) ++ return -EINVAL; ++ } else ++ intelfb_set_par(info); ++ ++ printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node, ++ info->fix.id); ++ ++ /* Switch back to kernel console on panic */ ++ kernelfb_mode = *modeset; ++ atomic_notifier_chain_register(&panic_notifier_list, &paniced); ++ printk(KERN_INFO "registered panic notifier\n"); ++ ++ return 0; ++} ++ ++static int intelfb_multi_fb_probe(struct drm_device *dev) ++{ ++ ++ struct drm_crtc *crtc; ++ int ret = 0; ++ ++ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { ++ ret = intelfb_multi_fb_probe_crtc(dev, crtc); ++ if (ret) ++ return ret; ++ } ++ return ret; ++} ++ ++static int intelfb_single_fb_probe(struct drm_device *dev) ++{ ++ struct drm_crtc *crtc; ++ struct drm_connector *connector; ++ unsigned int fb_width = (unsigned)-1, fb_height = (unsigned)-1; ++ unsigned int surface_width = 0, surface_height = 0; ++ int new_fb = 0; ++ int crtc_count = 0; ++ int ret, i, conn_count = 0; ++ struct intel_framebuffer *intel_fb; ++ struct fb_info *info; ++ struct intelfb_par *par; ++ struct drm_mode_set *modeset = NULL; ++ ++ DRM_DEBUG("\n"); ++ ++ /* Get a count of crtcs now in use and new min/maxes width/heights */ ++ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { ++ if (!drm_helper_crtc_in_use(crtc)) ++ continue; ++ ++ crtc_count++; ++ if (!crtc->desired_mode) ++ continue; ++ ++ /* Smallest mode determines console size... */ ++ if (crtc->desired_mode->hdisplay < fb_width) ++ fb_width = crtc->desired_mode->hdisplay; ++ ++ if (crtc->desired_mode->vdisplay < fb_height) ++ fb_height = crtc->desired_mode->vdisplay; ++ ++ /* ... but largest for memory allocation dimensions */ ++ if (crtc->desired_mode->hdisplay > surface_width) ++ surface_width = crtc->desired_mode->hdisplay; ++ ++ if (crtc->desired_mode->vdisplay > surface_height) ++ surface_height = crtc->desired_mode->vdisplay; ++ } ++ ++ if (crtc_count == 0 || fb_width == -1 || fb_height == -1) { ++ /* hmm everyone went away - assume VGA cable just fell out ++ and will come back later. */ ++ DRM_DEBUG("no CRTCs available?\n"); ++ return 0; ++ } ++ ++//fail ++ /* Find the fb for our new config */ ++ if (list_empty(&dev->mode_config.fb_kernel_list)) { ++ DRM_DEBUG("creating new fb (console size %dx%d, " ++ "buffer size %dx%d)\n", fb_width, fb_height, ++ surface_width, surface_height); ++ ret = intelfb_create(dev, fb_width, fb_height, surface_width, ++ surface_height, &intel_fb); ++ if (ret) ++ return -EINVAL; ++ new_fb = 1; ++ } else { ++ struct drm_framebuffer *fb; ++ ++ fb = list_first_entry(&dev->mode_config.fb_kernel_list, ++ struct drm_framebuffer, filp_head); ++ intel_fb = to_intel_framebuffer(fb); ++ ++ /* if someone hotplugs something bigger than we have already ++ * allocated, we are pwned. As really we can't resize an ++ * fbdev that is in the wild currently due to fbdev not really ++ * being designed for the lower layers moving stuff around ++ * under it. ++ * - so in the grand style of things - punt. ++ */ ++ if ((fb->width < surface_width) || ++ (fb->height < surface_height)) { ++ DRM_ERROR("fb not large enough for console\n"); ++ return -EINVAL; ++ } ++ } ++// fail ++ ++ info = intel_fb->base.fbdev; ++ par = info->par; ++ ++ crtc_count = 0; ++ /* ++ * For each CRTC, set up the connector list for the CRTC's mode ++ * set configuration. ++ */ ++ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { ++ struct intel_crtc *intel_crtc = to_intel_crtc(crtc); ++ ++ modeset = &intel_crtc->mode_set; ++ modeset->fb = &intel_fb->base; ++ conn_count = 0; ++ list_for_each_entry(connector, &dev->mode_config.connector_list, ++ head) { ++ if (!connector->encoder) ++ continue; ++ ++ if(connector->encoder->crtc == modeset->crtc) { ++ modeset->connectors[conn_count++] = connector; ++ if (conn_count > INTELFB_CONN_LIMIT) ++ BUG(); ++ } ++ } ++ ++ /* Zero out remaining connector pointers */ ++ for (i = conn_count; i < INTELFB_CONN_LIMIT; i++) ++ modeset->connectors[i] = NULL; ++ ++ par->crtc_ids[crtc_count++] = crtc->base.id; ++ ++ modeset->num_connectors = conn_count; ++ if (modeset->mode != modeset->crtc->desired_mode) ++ modeset->mode = modeset->crtc->desired_mode; ++ } ++ par->crtc_count = crtc_count; ++ ++ if (new_fb) { ++ info->var.pixclock = -1; ++ if (register_framebuffer(info) < 0) ++ return -EINVAL; ++ } else ++ intelfb_set_par(info); ++ ++ printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node, ++ info->fix.id); ++ ++ /* Switch back to kernel console on panic */ ++ kernelfb_mode = *modeset; ++ atomic_notifier_chain_register(&panic_notifier_list, &paniced); ++ printk(KERN_INFO "registered panic notifier\n"); ++ ++ return 0; ++} ++ ++/** ++ * intelfb_restore - restore the framebuffer console (kernel) config ++ * ++ * Restore's the kernel's fbcon mode, used for lastclose & panic paths. ++ */ ++void intelfb_restore(void) ++{ ++ drm_crtc_helper_set_config(&kernelfb_mode); ++} ++ ++static void intelfb_sysrq(int dummy1, struct tty_struct *dummy3) ++{ ++ intelfb_restore(); ++} ++ ++static struct sysrq_key_op sysrq_intelfb_restore_op = { ++ .handler = intelfb_sysrq, ++ .help_msg = "force fb", ++ .action_msg = "force restore of fb console", ++}; ++ ++int intelfb_probe(struct drm_device *dev) ++{ ++ int ret; ++ ++ DRM_DEBUG("\n"); ++ ++ /* something has changed in the lower levels of hell - deal with it ++ here */ ++ ++ /* two modes : a) 1 fb to rule all crtcs. ++ b) one fb per crtc. ++ two actions 1) new connected device ++ 2) device removed. ++ case a/1 : if the fb surface isn't big enough - resize the surface fb. ++ if the fb size isn't big enough - resize fb into surface. ++ if everything big enough configure the new crtc/etc. ++ case a/2 : undo the configuration ++ possibly resize down the fb to fit the new configuration. ++ case b/1 : see if it is on a new crtc - setup a new fb and add it. ++ case b/2 : teardown the new fb. ++ */ ++ ++ /* mode a first */ ++ /* search for an fb */ ++ if (i915_fbpercrtc == 1) { ++ ret = intelfb_multi_fb_probe(dev); ++ } else { ++ ret = intelfb_single_fb_probe(dev); ++ } ++ ++ register_sysrq_key('g', &sysrq_intelfb_restore_op); ++ ++ return ret; ++} ++EXPORT_SYMBOL(intelfb_probe); ++ ++int intelfb_remove(struct drm_device *dev, struct drm_framebuffer *fb) ++{ ++ struct fb_info *info; ++ ++ if (!fb) ++ return -EINVAL; ++ ++ info = fb->fbdev; ++ ++ if (info) { ++ unregister_framebuffer(info); ++ iounmap(info->screen_base); ++ framebuffer_release(info); ++ } ++ ++ atomic_notifier_chain_unregister(&panic_notifier_list, &paniced); ++ memset(&kernelfb_mode, 0, sizeof(struct drm_mode_set)); ++ return 0; ++} ++EXPORT_SYMBOL(intelfb_remove); ++MODULE_LICENSE("GPL and additional rights"); +diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c +new file mode 100644 +index 0000000..a5a2f53 +--- /dev/null ++++ b/drivers/gpu/drm/i915/intel_i2c.c +@@ -0,0 +1,184 @@ ++/* ++ * Copyright (c) 2006 Dave Airlie ++ * Copyright © 2006-2008 Intel Corporation ++ * Jesse Barnes ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING ++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: ++ * Eric Anholt ++ */ ++#include ++#include ++#include ++#include "drmP.h" ++#include "drm.h" ++#include "intel_drv.h" ++#include "i915_drm.h" ++#include "i915_drv.h" ++ ++/* ++ * Intel GPIO access functions ++ */ ++ ++#define I2C_RISEFALL_TIME 20 ++ ++static int get_clock(void *data) ++{ ++ struct intel_i2c_chan *chan = data; ++ struct drm_i915_private *dev_priv = chan->drm_dev->dev_private; ++ u32 val; ++ ++ val = I915_READ(chan->reg); ++ return ((val & GPIO_CLOCK_VAL_IN) != 0); ++} ++ ++static int get_data(void *data) ++{ ++ struct intel_i2c_chan *chan = data; ++ struct drm_i915_private *dev_priv = chan->drm_dev->dev_private; ++ u32 val; ++ ++ val = I915_READ(chan->reg); ++ return ((val & GPIO_DATA_VAL_IN) != 0); ++} ++ ++static void set_clock(void *data, int state_high) ++{ ++ struct intel_i2c_chan *chan = data; ++ struct drm_device *dev = chan->drm_dev; ++ struct drm_i915_private *dev_priv = chan->drm_dev->dev_private; ++ u32 reserved = 0, clock_bits; ++ ++ /* On most chips, these bits must be preserved in software. */ ++ if (!IS_I830(dev) && !IS_845G(dev)) ++ reserved = I915_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE | ++ GPIO_CLOCK_PULLUP_DISABLE); ++ ++ if (state_high) ++ clock_bits = GPIO_CLOCK_DIR_IN | GPIO_CLOCK_DIR_MASK; ++ else ++ clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK | ++ GPIO_CLOCK_VAL_MASK; ++ I915_WRITE(chan->reg, reserved | clock_bits); ++ udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */ ++} ++ ++static void set_data(void *data, int state_high) ++{ ++ struct intel_i2c_chan *chan = data; ++ struct drm_device *dev = chan->drm_dev; ++ struct drm_i915_private *dev_priv = chan->drm_dev->dev_private; ++ u32 reserved = 0, data_bits; ++ ++ /* On most chips, these bits must be preserved in software. */ ++ if (!IS_I830(dev) && !IS_845G(dev)) ++ reserved = I915_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE | ++ GPIO_CLOCK_PULLUP_DISABLE); ++ ++ if (state_high) ++ data_bits = GPIO_DATA_DIR_IN | GPIO_DATA_DIR_MASK; ++ else ++ data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK | ++ GPIO_DATA_VAL_MASK; ++ ++ I915_WRITE(chan->reg, reserved | data_bits); ++ udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */ ++} ++ ++/** ++ * intel_i2c_create - instantiate an Intel i2c bus using the specified GPIO reg ++ * @dev: DRM device ++ * @output: driver specific output device ++ * @reg: GPIO reg to use ++ * @name: name for this bus ++ * ++ * Creates and registers a new i2c bus with the Linux i2c layer, for use ++ * in output probing and control (e.g. DDC or SDVO control functions). ++ * ++ * Possible values for @reg include: ++ * %GPIOA ++ * %GPIOB ++ * %GPIOC ++ * %GPIOD ++ * %GPIOE ++ * %GPIOF ++ * %GPIOG ++ * %GPIOH ++ * see PRM for details on how these different busses are used. ++ */ ++struct intel_i2c_chan *intel_i2c_create(struct drm_device *dev, const u32 reg, ++ const char *name) ++{ ++ struct intel_i2c_chan *chan; ++ ++ chan = kzalloc(sizeof(struct intel_i2c_chan), GFP_KERNEL); ++ if (!chan) ++ goto out_free; ++ ++ chan->drm_dev = dev; ++ chan->reg = reg; ++ snprintf(chan->adapter.name, I2C_NAME_SIZE, "intel drm %s", name); ++ chan->adapter.owner = THIS_MODULE; ++#ifndef I2C_HW_B_INTELFB ++#define I2C_HW_B_INTELFB I2C_HW_B_I810 ++#endif ++ chan->adapter.id = I2C_HW_B_INTELFB; ++ chan->adapter.algo_data = &chan->algo; ++ chan->adapter.dev.parent = &dev->pdev->dev; ++ chan->algo.setsda = set_data; ++ chan->algo.setscl = set_clock; ++ chan->algo.getsda = get_data; ++ chan->algo.getscl = get_clock; ++ chan->algo.udelay = 20; ++ chan->algo.timeout = usecs_to_jiffies(2200); ++ chan->algo.data = chan; ++ ++ i2c_set_adapdata(&chan->adapter, chan); ++ ++ if(i2c_bit_add_bus(&chan->adapter)) ++ goto out_free; ++ ++ /* JJJ: raise SCL and SDA? */ ++ set_data(chan, 1); ++ set_clock(chan, 1); ++ udelay(20); ++ ++ return chan; ++ ++out_free: ++ kfree(chan); ++ return NULL; ++} ++ ++/** ++ * intel_i2c_destroy - unregister and free i2c bus resources ++ * @output: channel to free ++ * ++ * Unregister the adapter from the i2c layer, then free the structure. ++ */ ++void intel_i2c_destroy(struct intel_i2c_chan *chan) ++{ ++ if (!chan) ++ return; ++ ++ i2c_del_adapter(&chan->adapter); ++ kfree(chan); ++} +diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c +new file mode 100644 +index 0000000..ccecfaf +--- /dev/null ++++ b/drivers/gpu/drm/i915/intel_lvds.c +@@ -0,0 +1,525 @@ ++/* ++ * Copyright © 2006-2007 Intel Corporation ++ * Copyright (c) 2006 Dave Airlie ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING ++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: ++ * Eric Anholt ++ * Dave Airlie ++ * Jesse Barnes ++ */ ++ ++#include ++#include "drmP.h" ++#include "drm.h" ++#include "drm_crtc.h" ++#include "drm_edid.h" ++#include "intel_drv.h" ++#include "i915_drm.h" ++#include "i915_drv.h" ++ ++/** ++ * Sets the backlight level. ++ * ++ * \param level backlight level, from 0 to intel_lvds_get_max_backlight(). ++ */ ++static void intel_lvds_set_backlight(struct drm_device *dev, int level) ++{ ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ u32 blc_pwm_ctl; ++ ++ blc_pwm_ctl = I915_READ(BLC_PWM_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK; ++ I915_WRITE(BLC_PWM_CTL, (blc_pwm_ctl | ++ (level << BACKLIGHT_DUTY_CYCLE_SHIFT))); ++} ++ ++/** ++ * Returns the maximum level of the backlight duty cycle field. ++ */ ++static u32 intel_lvds_get_max_backlight(struct drm_device *dev) ++{ ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ ++ return ((I915_READ(BLC_PWM_CTL) & BACKLIGHT_MODULATION_FREQ_MASK) >> ++ BACKLIGHT_MODULATION_FREQ_SHIFT) * 2; ++} ++ ++/** ++ * Sets the power state for the panel. ++ */ ++static void intel_lvds_set_power(struct drm_device *dev, bool on) ++{ ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ u32 pp_status; ++ ++ if (on) { ++ I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | ++ POWER_TARGET_ON); ++ do { ++ pp_status = I915_READ(PP_STATUS); ++ } while ((pp_status & PP_ON) == 0); ++ ++ intel_lvds_set_backlight(dev, dev_priv->backlight_duty_cycle); ++ } else { ++ intel_lvds_set_backlight(dev, 0); ++ ++ I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & ++ ~POWER_TARGET_ON); ++ do { ++ pp_status = I915_READ(PP_STATUS); ++ } while (pp_status & PP_ON); ++ } ++} ++ ++static void intel_lvds_dpms(struct drm_encoder *encoder, int mode) ++{ ++ struct drm_device *dev = encoder->dev; ++ ++ if (mode == DRM_MODE_DPMS_ON) ++ intel_lvds_set_power(dev, true); ++ else ++ intel_lvds_set_power(dev, false); ++ ++ /* XXX: We never power down the LVDS pairs. */ ++} ++ ++static void intel_lvds_save(struct drm_connector *connector) ++{ ++ struct drm_device *dev = connector->dev; ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ ++ dev_priv->savePP_ON = I915_READ(PP_ON_DELAYS); ++ dev_priv->savePP_OFF = I915_READ(PP_OFF_DELAYS); ++ dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL); ++ dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR); ++ dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL); ++ dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL & ++ BACKLIGHT_DUTY_CYCLE_MASK); ++ ++ /* ++ * If the light is off at server startup, just make it full brightness ++ */ ++ if (dev_priv->backlight_duty_cycle == 0) ++ dev_priv->backlight_duty_cycle = ++ intel_lvds_get_max_backlight(dev); ++} ++ ++static void intel_lvds_restore(struct drm_connector *connector) ++{ ++ struct drm_device *dev = connector->dev; ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ ++ I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL); ++ I915_WRITE(PP_ON_DELAYS, dev_priv->savePP_ON); ++ I915_WRITE(PP_OFF_DELAYS, dev_priv->savePP_OFF); ++ I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR); ++ I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL); ++ if (dev_priv->savePP_CONTROL & POWER_TARGET_ON) ++ intel_lvds_set_power(dev, true); ++ else ++ intel_lvds_set_power(dev, false); ++} ++ ++static int intel_lvds_mode_valid(struct drm_connector *connector, ++ struct drm_display_mode *mode) ++{ ++ struct drm_device *dev = connector->dev; ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ struct drm_display_mode *fixed_mode = dev_priv->panel_fixed_mode; ++ ++ if (fixed_mode) { ++ if (mode->hdisplay > fixed_mode->hdisplay) ++ return MODE_PANEL; ++ if (mode->vdisplay > fixed_mode->vdisplay) ++ return MODE_PANEL; ++ } ++ ++ return MODE_OK; ++} ++ ++static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, ++ struct drm_display_mode *mode, ++ struct drm_display_mode *adjusted_mode) ++{ ++ struct drm_device *dev = encoder->dev; ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); ++ struct drm_encoder *tmp_encoder; ++ ++ /* Should never happen!! */ ++ if (!IS_I965G(dev) && intel_crtc->pipe == 0) { ++ printk(KERN_ERR "Can't support LVDS on pipe A\n"); ++ return false; ++ } ++ ++ /* Should never happen!! */ ++ list_for_each_entry(tmp_encoder, &dev->mode_config.encoder_list, head) { ++ if (tmp_encoder != encoder && tmp_encoder->crtc == encoder->crtc) { ++ printk(KERN_ERR "Can't enable LVDS and another " ++ "encoder on the same pipe\n"); ++ return false; ++ } ++ } ++ ++ /* ++ * If we have timings from the BIOS for the panel, put them in ++ * to the adjusted mode. The CRTC will be set up for this mode, ++ * with the panel scaling set up to source from the H/VDisplay ++ * of the original mode. ++ */ ++ if (dev_priv->panel_fixed_mode != NULL) { ++ adjusted_mode->hdisplay = dev_priv->panel_fixed_mode->hdisplay; ++ adjusted_mode->hsync_start = ++ dev_priv->panel_fixed_mode->hsync_start; ++ adjusted_mode->hsync_end = ++ dev_priv->panel_fixed_mode->hsync_end; ++ adjusted_mode->htotal = dev_priv->panel_fixed_mode->htotal; ++ adjusted_mode->vdisplay = dev_priv->panel_fixed_mode->vdisplay; ++ adjusted_mode->vsync_start = ++ dev_priv->panel_fixed_mode->vsync_start; ++ adjusted_mode->vsync_end = ++ dev_priv->panel_fixed_mode->vsync_end; ++ adjusted_mode->vtotal = dev_priv->panel_fixed_mode->vtotal; ++ adjusted_mode->clock = dev_priv->panel_fixed_mode->clock; ++ drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V); ++ } ++ ++ /* ++ * XXX: It would be nice to support lower refresh rates on the ++ * panels to reduce power consumption, and perhaps match the ++ * user's requested refresh rate. ++ */ ++ ++ return true; ++} ++ ++static void intel_lvds_prepare(struct drm_encoder *encoder) ++{ ++ struct drm_device *dev = encoder->dev; ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ ++ dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL); ++ dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL & ++ BACKLIGHT_DUTY_CYCLE_MASK); ++ ++ intel_lvds_set_power(dev, false); ++} ++ ++static void intel_lvds_commit( struct drm_encoder *encoder) ++{ ++ struct drm_device *dev = encoder->dev; ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ ++ if (dev_priv->backlight_duty_cycle == 0) ++ dev_priv->backlight_duty_cycle = ++ intel_lvds_get_max_backlight(dev); ++ ++ intel_lvds_set_power(dev, true); ++} ++ ++static void intel_lvds_mode_set(struct drm_encoder *encoder, ++ struct drm_display_mode *mode, ++ struct drm_display_mode *adjusted_mode) ++{ ++ struct drm_device *dev = encoder->dev; ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); ++ u32 pfit_control; ++ ++ /* ++ * The LVDS pin pair will already have been turned on in the ++ * intel_crtc_mode_set since it has a large impact on the DPLL ++ * settings. ++ */ ++ ++ /* ++ * Enable automatic panel scaling so that non-native modes fill the ++ * screen. Should be enabled before the pipe is enabled, according to ++ * register description and PRM. ++ */ ++ if (mode->hdisplay != adjusted_mode->hdisplay || ++ mode->vdisplay != adjusted_mode->vdisplay) ++ pfit_control = (PFIT_ENABLE | VERT_AUTO_SCALE | ++ HORIZ_AUTO_SCALE | VERT_INTERP_BILINEAR | ++ HORIZ_INTERP_BILINEAR); ++ else ++ pfit_control = 0; ++ ++ if (!IS_I965G(dev)) { ++ if (dev_priv->panel_wants_dither) ++ pfit_control |= PANEL_8TO6_DITHER_ENABLE; ++ } ++ else ++ pfit_control |= intel_crtc->pipe << PFIT_PIPE_SHIFT; ++ ++ I915_WRITE(PFIT_CONTROL, pfit_control); ++} ++ ++/** ++ * Detect the LVDS connection. ++ * ++ * This always returns CONNECTOR_STATUS_CONNECTED. This connector should only have ++ * been set up if the LVDS was actually connected anyway. ++ */ ++static enum drm_connector_status intel_lvds_detect(struct drm_connector *connector) ++{ ++ return connector_status_connected; ++} ++ ++/** ++ * Return the list of DDC modes if available, or the BIOS fixed mode otherwise. ++ */ ++static int intel_lvds_get_modes(struct drm_connector *connector) ++{ ++ struct drm_device *dev = connector->dev; ++ struct intel_output *intel_output = to_intel_output(connector); ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ int ret = 0; ++ ++ ret = intel_ddc_get_modes(intel_output); ++ ++ if (ret) ++ return ret; ++ ++ /* Didn't get an EDID, so ++ * Set wide sync ranges so we get all modes ++ * handed to valid_mode for checking ++ */ ++ connector->display_info.min_vfreq = 0; ++ connector->display_info.max_vfreq = 200; ++ connector->display_info.min_hfreq = 0; ++ connector->display_info.max_hfreq = 200; ++ ++ if (dev_priv->panel_fixed_mode != NULL) { ++ struct drm_display_mode *mode; ++ ++ mutex_unlock(&dev->mode_config.mutex); ++ mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode); ++ drm_mode_probed_add(connector, mode); ++ mutex_unlock(&dev->mode_config.mutex); ++ ++ return 1; ++ } ++ ++ return 0; ++} ++ ++/** ++ * intel_lvds_destroy - unregister and free LVDS structures ++ * @connector: connector to free ++ * ++ * Unregister the DDC bus for this connector then free the driver private ++ * structure. ++ */ ++static void intel_lvds_destroy(struct drm_connector *connector) ++{ ++ struct intel_output *intel_output = to_intel_output(connector); ++ ++ if (intel_output->ddc_bus) ++ intel_i2c_destroy(intel_output->ddc_bus); ++ drm_sysfs_connector_remove(connector); ++ drm_connector_cleanup(connector); ++ kfree(connector); ++} ++ ++static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = { ++ .dpms = intel_lvds_dpms, ++ .mode_fixup = intel_lvds_mode_fixup, ++ .prepare = intel_lvds_prepare, ++ .mode_set = intel_lvds_mode_set, ++ .commit = intel_lvds_commit, ++}; ++ ++static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = { ++ .get_modes = intel_lvds_get_modes, ++ .mode_valid = intel_lvds_mode_valid, ++ .best_encoder = intel_best_encoder, ++}; ++ ++static const struct drm_connector_funcs intel_lvds_connector_funcs = { ++ .save = intel_lvds_save, ++ .restore = intel_lvds_restore, ++ .detect = intel_lvds_detect, ++ .fill_modes = drm_helper_probe_single_connector_modes, ++ .destroy = intel_lvds_destroy, ++}; ++ ++ ++static void intel_lvds_enc_destroy(struct drm_encoder *encoder) ++{ ++ drm_encoder_cleanup(encoder); ++} ++ ++static const struct drm_encoder_funcs intel_lvds_enc_funcs = { ++ .destroy = intel_lvds_enc_destroy, ++}; ++ ++ ++ ++/** ++ * intel_lvds_init - setup LVDS connectors on this device ++ * @dev: drm device ++ * ++ * Create the connector, register the LVDS DDC bus, and try to figure out what ++ * modes we can display on the LVDS panel (if present). ++ */ ++void intel_lvds_init(struct drm_device *dev) ++{ ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ struct intel_output *intel_output; ++ struct drm_connector *connector; ++ struct drm_encoder *encoder; ++ struct drm_display_mode *scan; /* *modes, *bios_mode; */ ++ struct drm_crtc *crtc; ++ u32 lvds; ++ int pipe; ++ ++ intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL); ++ if (!intel_output) { ++ return; ++ } ++ ++ connector = &intel_output->base; ++ encoder = &intel_output->enc; ++ drm_connector_init(dev, &intel_output->base, &intel_lvds_connector_funcs, ++ DRM_MODE_CONNECTOR_LVDS); ++ ++ drm_encoder_init(dev, &intel_output->enc, &intel_lvds_enc_funcs, ++ DRM_MODE_ENCODER_LVDS); ++ ++ drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc); ++ intel_output->type = INTEL_OUTPUT_LVDS; ++ ++ drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs); ++ drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs); ++ connector->display_info.subpixel_order = SubPixelHorizontalRGB; ++ connector->interlace_allowed = false; ++ connector->doublescan_allowed = false; ++ ++ ++ /* ++ * LVDS discovery: ++ * 1) check for EDID on DDC ++ * 2) check for VBT data ++ * 3) check to see if LVDS is already on ++ * if none of the above, no panel ++ * 4) make sure lid is open ++ * if closed, act like it's not there for now ++ */ ++ ++ /* Set up the DDC bus. */ ++ intel_output->ddc_bus = intel_i2c_create(dev, GPIOC, "LVDSDDC_C"); ++ if (!intel_output->ddc_bus) { ++ dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration " ++ "failed.\n"); ++ goto failed; ++ } ++ ++ /* ++ * Attempt to get the fixed panel mode from DDC. Assume that the ++ * preferred mode is the right one. ++ */ ++ intel_ddc_get_modes(intel_output); ++ ++ list_for_each_entry(scan, &connector->probed_modes, head) { ++ mutex_lock(&dev->mode_config.mutex); ++ if (scan->type & DRM_MODE_TYPE_PREFERRED) { ++ dev_priv->panel_fixed_mode = ++ drm_mode_duplicate(dev, scan); ++ mutex_unlock(&dev->mode_config.mutex); ++ goto out; /* FIXME: check for quirks */ ++ } ++ mutex_unlock(&dev->mode_config.mutex); ++ } ++ ++ /* Failed to get EDID, what about VBT? */ ++ if (dev_priv->vbt_mode) { ++ mutex_lock(&dev->mode_config.mutex); ++ dev_priv->panel_fixed_mode = ++ drm_mode_duplicate(dev, dev_priv->vbt_mode); ++ mutex_unlock(&dev->mode_config.mutex); ++ } ++ ++ /* ++ * If we didn't get EDID, try checking if the panel is already turned ++ * on. If so, assume that whatever is currently programmed is the ++ * correct mode. ++ */ ++ lvds = I915_READ(LVDS); ++ pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0; ++ crtc = intel_get_crtc_from_pipe(dev, pipe); ++ ++ if (crtc && (lvds & LVDS_PORT_EN)) { ++ dev_priv->panel_fixed_mode = intel_crtc_mode_get(dev, crtc); ++ if (dev_priv->panel_fixed_mode) { ++ dev_priv->panel_fixed_mode->type |= ++ DRM_MODE_TYPE_PREFERRED; ++ goto out; /* FIXME: check for quirks */ ++ } ++ } ++ ++ /* If we still don't have a mode after all that, give up. */ ++ if (!dev_priv->panel_fixed_mode) ++ goto failed; ++ ++ /* FIXME: detect aopen & mac mini type stuff automatically? */ ++ /* ++ * Blacklist machines with BIOSes that list an LVDS panel without ++ * actually having one. ++ */ ++ if (IS_I945GM(dev)) { ++ /* aopen mini pc */ ++ if (dev->pdev->subsystem_vendor == 0xa0a0) ++ goto failed; ++ ++ if ((dev->pdev->subsystem_vendor == 0x8086) && ++ (dev->pdev->subsystem_device == 0x7270)) { ++ /* It's a Mac Mini or Macbook Pro. ++ * ++ * Apple hardware is out to get us. The macbook pro ++ * has a real LVDS panel, but the mac mini does not, ++ * and they have the same device IDs. We'll ++ * distinguish by panel size, on the assumption ++ * that Apple isn't about to make any machines with an ++ * 800x600 display. ++ */ ++ ++ if (dev_priv->panel_fixed_mode != NULL && ++ dev_priv->panel_fixed_mode->hdisplay == 800 && ++ dev_priv->panel_fixed_mode->vdisplay == 600) { ++ DRM_DEBUG("Suspected Mac Mini, ignoring the LVDS\n"); ++ goto failed; ++ } ++ } ++ } ++ ++ ++out: ++ drm_sysfs_connector_add(connector); ++ return; ++ ++failed: ++ DRM_DEBUG("No LVDS modes found, disabling.\n"); ++ if (intel_output->ddc_bus) ++ intel_i2c_destroy(intel_output->ddc_bus); ++ drm_connector_cleanup(connector); ++ kfree(connector); ++} +diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c +new file mode 100644 +index 0000000..e42019e +--- /dev/null ++++ b/drivers/gpu/drm/i915/intel_modes.c +@@ -0,0 +1,83 @@ ++/* ++ * Copyright (c) 2007 Dave Airlie ++ * Copyright (c) 2007 Intel Corporation ++ * Jesse Barnes ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING ++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include ++#include ++#include "drmP.h" ++#include "intel_drv.h" ++ ++/** ++ * intel_ddc_probe ++ * ++ */ ++bool intel_ddc_probe(struct intel_output *intel_output) ++{ ++ u8 out_buf[] = { 0x0, 0x0}; ++ u8 buf[2]; ++ int ret; ++ struct i2c_msg msgs[] = { ++ { ++ .addr = 0x50, ++ .flags = 0, ++ .len = 1, ++ .buf = out_buf, ++ }, ++ { ++ .addr = 0x50, ++ .flags = I2C_M_RD, ++ .len = 1, ++ .buf = buf, ++ } ++ }; ++ ++ ret = i2c_transfer(&intel_output->ddc_bus->adapter, msgs, 2); ++ if (ret == 2) ++ return true; ++ ++ return false; ++} ++ ++/** ++ * intel_ddc_get_modes - get modelist from monitor ++ * @connector: DRM connector device to use ++ * ++ * Fetch the EDID information from @connector using the DDC bus. ++ */ ++int intel_ddc_get_modes(struct intel_output *intel_output) ++{ ++ struct edid *edid; ++ int ret = 0; ++ ++ edid = drm_get_edid(&intel_output->base, ++ &intel_output->ddc_bus->adapter); ++ if (edid) { ++ drm_mode_connector_update_edid_property(&intel_output->base, ++ edid); ++ ret = drm_add_edid_modes(&intel_output->base, edid); ++ kfree(edid); ++ } ++ ++ return ret; ++} +diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c +new file mode 100644 +index 0000000..626258d +--- /dev/null ++++ b/drivers/gpu/drm/i915/intel_sdvo.c +@@ -0,0 +1,1127 @@ ++/* ++ * Copyright 2006 Dave Airlie ++ * Copyright © 2006-2007 Intel Corporation ++ * Jesse Barnes ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING ++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: ++ * Eric Anholt ++ */ ++#include ++#include ++#include "drmP.h" ++#include "drm.h" ++#include "drm_crtc.h" ++#include "intel_drv.h" ++#include "i915_drm.h" ++#include "i915_drv.h" ++#include "intel_sdvo_regs.h" ++ ++#undef SDVO_DEBUG ++ ++struct intel_sdvo_priv { ++ struct intel_i2c_chan *i2c_bus; ++ int slaveaddr; ++ int output_device; ++ ++ u16 active_outputs; ++ ++ struct intel_sdvo_caps caps; ++ int pixel_clock_min, pixel_clock_max; ++ ++ int save_sdvo_mult; ++ u16 save_active_outputs; ++ struct intel_sdvo_dtd save_input_dtd_1, save_input_dtd_2; ++ struct intel_sdvo_dtd save_output_dtd[16]; ++ u32 save_SDVOX; ++}; ++ ++/** ++ * Writes the SDVOB or SDVOC with the given value, but always writes both ++ * SDVOB and SDVOC to work around apparent hardware issues (according to ++ * comments in the BIOS). ++ */ ++void intel_sdvo_write_sdvox(struct intel_output *intel_output, u32 val) ++{ ++ struct drm_device *dev = intel_output->base.dev; ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; ++ u32 bval = val, cval = val; ++ int i; ++ ++ if (sdvo_priv->output_device == SDVOB) { ++ cval = I915_READ(SDVOC); ++ } else { ++ bval = I915_READ(SDVOB); ++ } ++ /* ++ * Write the registers twice for luck. Sometimes, ++ * writing them only once doesn't appear to 'stick'. ++ * The BIOS does this too. Yay, magic ++ */ ++ for (i = 0; i < 2; i++) ++ { ++ I915_WRITE(SDVOB, bval); ++ I915_READ(SDVOB); ++ I915_WRITE(SDVOC, cval); ++ I915_READ(SDVOC); ++ } ++} ++ ++static bool intel_sdvo_read_byte(struct intel_output *intel_output, u8 addr, ++ u8 *ch) ++{ ++ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; ++ u8 out_buf[2]; ++ u8 buf[2]; ++ int ret; ++ ++ struct i2c_msg msgs[] = { ++ { ++ .addr = sdvo_priv->i2c_bus->slave_addr, ++ .flags = 0, ++ .len = 1, ++ .buf = out_buf, ++ }, ++ { ++ .addr = sdvo_priv->i2c_bus->slave_addr, ++ .flags = I2C_M_RD, ++ .len = 1, ++ .buf = buf, ++ } ++ }; ++ ++ out_buf[0] = addr; ++ out_buf[1] = 0; ++ ++ if ((ret = i2c_transfer(&sdvo_priv->i2c_bus->adapter, msgs, 2)) == 2) ++ { ++ *ch = buf[0]; ++ return true; ++ } ++ ++ DRM_DEBUG("i2c transfer returned %d\n", ret); ++ return false; ++} ++ ++static bool intel_sdvo_write_byte(struct intel_output *intel_output, int addr, ++ u8 ch) ++{ ++ u8 out_buf[2]; ++ struct i2c_msg msgs[] = { ++ { ++ .addr = intel_output->i2c_bus->slave_addr, ++ .flags = 0, ++ .len = 2, ++ .buf = out_buf, ++ } ++ }; ++ ++ out_buf[0] = addr; ++ out_buf[1] = ch; ++ ++ if (i2c_transfer(&intel_output->i2c_bus->adapter, msgs, 1) == 1) ++ { ++ return true; ++ } ++ return false; ++} ++ ++#define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd} ++/** Mapping of command numbers to names, for debug output */ ++const static struct _sdvo_cmd_name { ++ u8 cmd; ++ char *name; ++} sdvo_cmd_names[] = { ++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET), ++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS), ++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FIRMWARE_REV), ++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TRAINED_INPUTS), ++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_OUTPUTS), ++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_OUTPUTS), ++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_IN_OUT_MAP), ++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_IN_OUT_MAP), ++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ATTACHED_DISPLAYS), ++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HOT_PLUG_SUPPORT), ++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_HOT_PLUG), ++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_HOT_PLUG), ++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE), ++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_INPUT), ++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_OUTPUT), ++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART1), ++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART2), ++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1), ++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART2), ++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1), ++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART1), ++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART2), ++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART1), ++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART2), ++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING), ++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1), ++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2), ++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE), ++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE), ++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS), ++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CLOCK_RATE_MULT), ++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CLOCK_RATE_MULT), ++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_TV_FORMATS), ++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_FORMAT), ++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_FORMAT), ++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_RESOLUTION_SUPPORT), ++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTROL_BUS_SWITCH), ++}; ++ ++#define SDVO_NAME(dev_priv) ((dev_priv)->output_device == SDVOB ? "SDVOB" : "SDVOC") ++#define SDVO_PRIV(output) ((struct intel_sdvo_priv *) (output)->dev_priv) ++ ++#ifdef SDVO_DEBUG ++static void intel_sdvo_debug_write(struct intel_output *intel_output, u8 cmd, ++ void *args, int args_len) ++{ ++ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; ++ int i; ++ ++ DRM_DEBUG("%s: W: %02X ", SDVO_NAME(sdvo_priv), cmd); ++ for (i = 0; i < args_len; i++) ++ printk("%02X ", ((u8 *)args)[i]); ++ for (; i < 8; i++) ++ printk(" "); ++ for (i = 0; i < sizeof(sdvo_cmd_names) / sizeof(sdvo_cmd_names[0]); i++) { ++ if (cmd == sdvo_cmd_names[i].cmd) { ++ printk("(%s)", sdvo_cmd_names[i].name); ++ break; ++ } ++ } ++ if (i == sizeof(sdvo_cmd_names)/ sizeof(sdvo_cmd_names[0])) ++ printk("(%02X)",cmd); ++ printk("\n"); ++} ++#else ++#define intel_sdvo_debug_write(o, c, a, l) ++#endif ++ ++static void intel_sdvo_write_cmd(struct intel_output *intel_output, u8 cmd, ++ void *args, int args_len) ++{ ++ int i; ++ ++ intel_sdvo_debug_write(intel_output, cmd, args, args_len); ++ ++ for (i = 0; i < args_len; i++) { ++ intel_sdvo_write_byte(intel_output, SDVO_I2C_ARG_0 - i, ++ ((u8*)args)[i]); ++ } ++ ++ intel_sdvo_write_byte(intel_output, SDVO_I2C_OPCODE, cmd); ++} ++ ++#ifdef SDVO_DEBUG ++static const char *cmd_status_names[] = { ++ "Power on", ++ "Success", ++ "Not supported", ++ "Invalid arg", ++ "Pending", ++ "Target not specified", ++ "Scaling not supported" ++}; ++ ++static void intel_sdvo_debug_response(struct intel_output *intel_output, ++ void *response, int response_len, ++ u8 status) ++{ ++ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; ++ ++ DRM_DEBUG("%s: R: ", SDVO_NAME(sdvo_priv)); ++ for (i = 0; i < response_len; i++) ++ printk("%02X ", ((u8 *)response)[i]); ++ for (; i < 8; i++) ++ printk(" "); ++ if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP) ++ printk("(%s)", cmd_status_names[status]); ++ else ++ printk("(??? %d)", status); ++ printk("\n"); ++} ++#else ++#define intel_sdvo_debug_response(o, r, l, s) ++#endif ++ ++static u8 intel_sdvo_read_response(struct intel_output *intel_output, ++ void *response, int response_len) ++{ ++ int i; ++ u8 status; ++ u8 retry = 50; ++ ++ while (retry--) { ++ /* Read the command response */ ++ for (i = 0; i < response_len; i++) { ++ intel_sdvo_read_byte(intel_output, ++ SDVO_I2C_RETURN_0 + i, ++ &((u8 *)response)[i]); ++ } ++ ++ /* read the return status */ ++ intel_sdvo_read_byte(intel_output, SDVO_I2C_CMD_STATUS, ++ &status); ++ ++ intel_sdvo_debug_response(intel_output, response, response_len, ++ status); ++ if (status != SDVO_CMD_STATUS_PENDING) ++ return status; ++ ++ mdelay(50); ++ } ++ ++ return status; ++} ++ ++int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode) ++{ ++ if (mode->clock >= 100000) ++ return 1; ++ else if (mode->clock >= 50000) ++ return 2; ++ else ++ return 4; ++} ++ ++/** ++ * Don't check status code from this as it switches the bus back to the ++ * SDVO chips which defeats the purpose of doing a bus switch in the first ++ * place. ++ */ ++void intel_sdvo_set_control_bus_switch(struct intel_output *intel_output, u8 target) ++{ ++ intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_CONTROL_BUS_SWITCH, &target, 1); ++} ++ ++static bool intel_sdvo_set_target_input(struct intel_output *intel_output, bool target_0, bool target_1) ++{ ++ struct intel_sdvo_set_target_input_args targets = {0}; ++ u8 status; ++ ++ if (target_0 && target_1) ++ return SDVO_CMD_STATUS_NOTSUPP; ++ ++ if (target_1) ++ targets.target_1 = 1; ++ ++ intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_TARGET_INPUT, &targets, ++ sizeof(targets)); ++ ++ status = intel_sdvo_read_response(intel_output, NULL, 0); ++ ++ return (status == SDVO_CMD_STATUS_SUCCESS); ++} ++ ++/** ++ * Return whether each input is trained. ++ * ++ * This function is making an assumption about the layout of the response, ++ * which should be checked against the docs. ++ */ ++static bool intel_sdvo_get_trained_inputs(struct intel_output *intel_output, bool *input_1, bool *input_2) ++{ ++ struct intel_sdvo_get_trained_inputs_response response; ++ u8 status; ++ ++ intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_TRAINED_INPUTS, NULL, 0); ++ status = intel_sdvo_read_response(intel_output, &response, sizeof(response)); ++ if (status != SDVO_CMD_STATUS_SUCCESS) ++ return false; ++ ++ *input_1 = response.input0_trained; ++ *input_2 = response.input1_trained; ++ return true; ++} ++ ++static bool intel_sdvo_get_active_outputs(struct intel_output *intel_output, ++ u16 *outputs) ++{ ++ u8 status; ++ ++ intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_ACTIVE_OUTPUTS, NULL, 0); ++ status = intel_sdvo_read_response(intel_output, outputs, sizeof(*outputs)); ++ ++ return (status == SDVO_CMD_STATUS_SUCCESS); ++} ++ ++static bool intel_sdvo_set_active_outputs(struct intel_output *intel_output, ++ u16 outputs) ++{ ++ u8 status; ++ ++ intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_ACTIVE_OUTPUTS, &outputs, ++ sizeof(outputs)); ++ status = intel_sdvo_read_response(intel_output, NULL, 0); ++ return (status == SDVO_CMD_STATUS_SUCCESS); ++} ++ ++static bool intel_sdvo_set_encoder_power_state(struct intel_output *intel_output, ++ int mode) ++{ ++ u8 status, state = SDVO_ENCODER_STATE_ON; ++ ++ switch (mode) { ++ case DRM_MODE_DPMS_ON: ++ state = SDVO_ENCODER_STATE_ON; ++ break; ++ case DRM_MODE_DPMS_STANDBY: ++ state = SDVO_ENCODER_STATE_STANDBY; ++ break; ++ case DRM_MODE_DPMS_SUSPEND: ++ state = SDVO_ENCODER_STATE_SUSPEND; ++ break; ++ case DRM_MODE_DPMS_OFF: ++ state = SDVO_ENCODER_STATE_OFF; ++ break; ++ } ++ ++ intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_ENCODER_POWER_STATE, &state, ++ sizeof(state)); ++ status = intel_sdvo_read_response(intel_output, NULL, 0); ++ ++ return (status == SDVO_CMD_STATUS_SUCCESS); ++} ++ ++static bool intel_sdvo_get_input_pixel_clock_range(struct intel_output *intel_output, ++ int *clock_min, ++ int *clock_max) ++{ ++ struct intel_sdvo_pixel_clock_range clocks; ++ u8 status; ++ ++ intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE, ++ NULL, 0); ++ ++ status = intel_sdvo_read_response(intel_output, &clocks, sizeof(clocks)); ++ ++ if (status != SDVO_CMD_STATUS_SUCCESS) ++ return false; ++ ++ /* Convert the values from units of 10 kHz to kHz. */ ++ *clock_min = clocks.min * 10; ++ *clock_max = clocks.max * 10; ++ ++ return true; ++} ++ ++static bool intel_sdvo_set_target_output(struct intel_output *intel_output, ++ u16 outputs) ++{ ++ u8 status; ++ ++ intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_TARGET_OUTPUT, &outputs, ++ sizeof(outputs)); ++ ++ status = intel_sdvo_read_response(intel_output, NULL, 0); ++ return (status == SDVO_CMD_STATUS_SUCCESS); ++} ++ ++static bool intel_sdvo_get_timing(struct intel_output *intel_output, u8 cmd, ++ struct intel_sdvo_dtd *dtd) ++{ ++ u8 status; ++ ++ intel_sdvo_write_cmd(intel_output, cmd, NULL, 0); ++ status = intel_sdvo_read_response(intel_output, &dtd->part1, ++ sizeof(dtd->part1)); ++ if (status != SDVO_CMD_STATUS_SUCCESS) ++ return false; ++ ++ intel_sdvo_write_cmd(intel_output, cmd + 1, NULL, 0); ++ status = intel_sdvo_read_response(intel_output, &dtd->part2, ++ sizeof(dtd->part2)); ++ if (status != SDVO_CMD_STATUS_SUCCESS) ++ return false; ++ ++ return true; ++} ++ ++static bool intel_sdvo_get_input_timing(struct intel_output *intel_output, ++ struct intel_sdvo_dtd *dtd) ++{ ++ return intel_sdvo_get_timing(intel_output, ++ SDVO_CMD_GET_INPUT_TIMINGS_PART1, dtd); ++} ++ ++static bool intel_sdvo_get_output_timing(struct intel_output *intel_output, ++ struct intel_sdvo_dtd *dtd) ++{ ++ return intel_sdvo_get_timing(intel_output, ++ SDVO_CMD_GET_OUTPUT_TIMINGS_PART1, dtd); ++} ++ ++static bool intel_sdvo_set_timing(struct intel_output *intel_output, u8 cmd, ++ struct intel_sdvo_dtd *dtd) ++{ ++ u8 status; ++ ++ intel_sdvo_write_cmd(intel_output, cmd, &dtd->part1, sizeof(dtd->part1)); ++ status = intel_sdvo_read_response(intel_output, NULL, 0); ++ if (status != SDVO_CMD_STATUS_SUCCESS) ++ return false; ++ ++ intel_sdvo_write_cmd(intel_output, cmd + 1, &dtd->part2, sizeof(dtd->part2)); ++ status = intel_sdvo_read_response(intel_output, NULL, 0); ++ if (status != SDVO_CMD_STATUS_SUCCESS) ++ return false; ++ ++ return true; ++} ++ ++static bool intel_sdvo_set_input_timing(struct intel_output *intel_output, ++ struct intel_sdvo_dtd *dtd) ++{ ++ return intel_sdvo_set_timing(intel_output, ++ SDVO_CMD_SET_INPUT_TIMINGS_PART1, dtd); ++} ++ ++static bool intel_sdvo_set_output_timing(struct intel_output *intel_output, ++ struct intel_sdvo_dtd *dtd) ++{ ++ return intel_sdvo_set_timing(intel_output, ++ SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, dtd); ++} ++ ++ ++static int intel_sdvo_get_clock_rate_mult(struct intel_output *intel_output) ++{ ++ u8 response, status; ++ ++ intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_CLOCK_RATE_MULT, NULL, 0); ++ status = intel_sdvo_read_response(intel_output, &response, 1); ++ ++ if (status != SDVO_CMD_STATUS_SUCCESS) { ++ DRM_DEBUG("Couldn't get SDVO clock rate multiplier\n"); ++ return SDVO_CLOCK_RATE_MULT_1X; ++ } else { ++ DRM_DEBUG("Current clock rate multiplier: %d\n", response); ++ } ++ ++ return response; ++} ++ ++static bool intel_sdvo_set_clock_rate_mult(struct intel_output *intel_output, u8 val) ++{ ++ u8 status; ++ ++ intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_CLOCK_RATE_MULT, &val, 1); ++ status = intel_sdvo_read_response(intel_output, NULL, 0); ++ if (status != SDVO_CMD_STATUS_SUCCESS) ++ return false; ++ ++ return true; ++} ++ ++static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder, ++ struct drm_display_mode *mode, ++ struct drm_display_mode *adjusted_mode) ++{ ++ /* Make the CRTC code factor in the SDVO pixel multiplier. The SDVO ++ * device will be told of the multiplier during mode_set. ++ */ ++ adjusted_mode->clock *= intel_sdvo_get_pixel_multiplier(mode); ++ return true; ++} ++ ++static void intel_sdvo_mode_set(struct drm_encoder *encoder, ++ struct drm_display_mode *mode, ++ struct drm_display_mode *adjusted_mode) ++{ ++ struct drm_device *dev = encoder->dev; ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ struct drm_crtc *crtc = encoder->crtc; ++ struct intel_crtc *intel_crtc = to_intel_crtc(crtc); ++ struct intel_output *intel_output = enc_to_intel_output(encoder); ++ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; ++ u16 width, height; ++ u16 h_blank_len, h_sync_len, v_blank_len, v_sync_len; ++ u16 h_sync_offset, v_sync_offset; ++ u32 sdvox; ++ struct intel_sdvo_dtd output_dtd; ++ int sdvo_pixel_multiply; ++ ++ if (!mode) ++ return; ++ ++ width = mode->crtc_hdisplay; ++ height = mode->crtc_vdisplay; ++ ++ /* do some mode translations */ ++ h_blank_len = mode->crtc_hblank_end - mode->crtc_hblank_start; ++ h_sync_len = mode->crtc_hsync_end - mode->crtc_hsync_start; ++ ++ v_blank_len = mode->crtc_vblank_end - mode->crtc_vblank_start; ++ v_sync_len = mode->crtc_vsync_end - mode->crtc_vsync_start; ++ ++ h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start; ++ v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start; ++ ++ output_dtd.part1.clock = mode->clock / 10; ++ output_dtd.part1.h_active = width & 0xff; ++ output_dtd.part1.h_blank = h_blank_len & 0xff; ++ output_dtd.part1.h_high = (((width >> 8) & 0xf) << 4) | ++ ((h_blank_len >> 8) & 0xf); ++ output_dtd.part1.v_active = height & 0xff; ++ output_dtd.part1.v_blank = v_blank_len & 0xff; ++ output_dtd.part1.v_high = (((height >> 8) & 0xf) << 4) | ++ ((v_blank_len >> 8) & 0xf); ++ ++ output_dtd.part2.h_sync_off = h_sync_offset; ++ output_dtd.part2.h_sync_width = h_sync_len & 0xff; ++ output_dtd.part2.v_sync_off_width = (v_sync_offset & 0xf) << 4 | ++ (v_sync_len & 0xf); ++ output_dtd.part2.sync_off_width_high = ((h_sync_offset & 0x300) >> 2) | ++ ((h_sync_len & 0x300) >> 4) | ((v_sync_offset & 0x30) >> 2) | ++ ((v_sync_len & 0x30) >> 4); ++ ++ output_dtd.part2.dtd_flags = 0x18; ++ if (mode->flags & DRM_MODE_FLAG_PHSYNC) ++ output_dtd.part2.dtd_flags |= 0x2; ++ if (mode->flags & DRM_MODE_FLAG_PVSYNC) ++ output_dtd.part2.dtd_flags |= 0x4; ++ ++ output_dtd.part2.sdvo_flags = 0; ++ output_dtd.part2.v_sync_off_high = v_sync_offset & 0xc0; ++ output_dtd.part2.reserved = 0; ++ ++ /* Set the output timing to the screen */ ++ intel_sdvo_set_target_output(intel_output, sdvo_priv->active_outputs); ++ intel_sdvo_set_output_timing(intel_output, &output_dtd); ++ ++ /* Set the input timing to the screen. Assume always input 0. */ ++ intel_sdvo_set_target_input(intel_output, true, false); ++ ++ /* We would like to use i830_sdvo_create_preferred_input_timing() to ++ * provide the device with a timing it can support, if it supports that ++ * feature. However, presumably we would need to adjust the CRTC to ++ * output the preferred timing, and we don't support that currently. ++ */ ++ intel_sdvo_set_input_timing(intel_output, &output_dtd); ++ ++ switch (intel_sdvo_get_pixel_multiplier(mode)) { ++ case 1: ++ intel_sdvo_set_clock_rate_mult(intel_output, ++ SDVO_CLOCK_RATE_MULT_1X); ++ break; ++ case 2: ++ intel_sdvo_set_clock_rate_mult(intel_output, ++ SDVO_CLOCK_RATE_MULT_2X); ++ break; ++ case 4: ++ intel_sdvo_set_clock_rate_mult(intel_output, ++ SDVO_CLOCK_RATE_MULT_4X); ++ break; ++ } ++ ++ /* Set the SDVO control regs. */ ++ if (0/*IS_I965GM(dev)*/) { ++ sdvox = SDVO_BORDER_ENABLE; ++ } else { ++ sdvox = I915_READ(sdvo_priv->output_device); ++ switch (sdvo_priv->output_device) { ++ case SDVOB: ++ sdvox &= SDVOB_PRESERVE_MASK; ++ break; ++ case SDVOC: ++ sdvox &= SDVOC_PRESERVE_MASK; ++ break; ++ } ++ sdvox |= (9 << 19) | SDVO_BORDER_ENABLE; ++ } ++ if (intel_crtc->pipe == 1) ++ sdvox |= SDVO_PIPE_B_SELECT; ++ ++ sdvo_pixel_multiply = intel_sdvo_get_pixel_multiplier(mode); ++ if (IS_I965G(dev)) { ++ /* done in crtc_mode_set as the dpll_md reg must be written ++ early */ ++ } else if (IS_I945G(dev) || IS_I945GM(dev)) { ++ /* done in crtc_mode_set as it lives inside the ++ dpll register */ ++ } else { ++ sdvox |= (sdvo_pixel_multiply - 1) << SDVO_PORT_MULTIPLY_SHIFT; ++ } ++ ++ intel_sdvo_write_sdvox(intel_output, sdvox); ++} ++ ++static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode) ++{ ++ struct drm_device *dev = encoder->dev; ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ struct intel_output *intel_output = enc_to_intel_output(encoder); ++ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; ++ u32 temp; ++ ++ if (mode != DRM_MODE_DPMS_ON) { ++ intel_sdvo_set_active_outputs(intel_output, 0); ++ if (0) ++ intel_sdvo_set_encoder_power_state(intel_output, mode); ++ ++ if (mode == DRM_MODE_DPMS_OFF) { ++ temp = I915_READ(sdvo_priv->output_device); ++ if ((temp & SDVO_ENABLE) != 0) { ++ intel_sdvo_write_sdvox(intel_output, temp & ~SDVO_ENABLE); ++ } ++ } ++ } else { ++ bool input1, input2; ++ int i; ++ u8 status; ++ ++ temp = I915_READ(sdvo_priv->output_device); ++ if ((temp & SDVO_ENABLE) == 0) ++ intel_sdvo_write_sdvox(intel_output, temp | SDVO_ENABLE); ++ for (i = 0; i < 2; i++) ++ intel_wait_for_vblank(dev); ++ ++ status = intel_sdvo_get_trained_inputs(intel_output, &input1, ++ &input2); ++ ++ ++ /* Warn if the device reported failure to sync. ++ * A lot of SDVO devices fail to notify of sync, but it's ++ * a given it the status is a success, we succeeded. ++ */ ++ if (status == SDVO_CMD_STATUS_SUCCESS && !input1) { ++ DRM_DEBUG("First %s output reported failure to sync\n", ++ SDVO_NAME(sdvo_priv)); ++ } ++ ++ if (0) ++ intel_sdvo_set_encoder_power_state(intel_output, mode); ++ intel_sdvo_set_active_outputs(intel_output, sdvo_priv->active_outputs); ++ } ++ return; ++} ++ ++static void intel_sdvo_save(struct drm_connector *connector) ++{ ++ struct drm_device *dev = connector->dev; ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ struct intel_output *intel_output = to_intel_output(connector); ++ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; ++ int o; ++ ++ sdvo_priv->save_sdvo_mult = intel_sdvo_get_clock_rate_mult(intel_output); ++ intel_sdvo_get_active_outputs(intel_output, &sdvo_priv->save_active_outputs); ++ ++ if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) { ++ intel_sdvo_set_target_input(intel_output, true, false); ++ intel_sdvo_get_input_timing(intel_output, ++ &sdvo_priv->save_input_dtd_1); ++ } ++ ++ if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) { ++ intel_sdvo_set_target_input(intel_output, false, true); ++ intel_sdvo_get_input_timing(intel_output, ++ &sdvo_priv->save_input_dtd_2); ++ } ++ ++ for (o = SDVO_OUTPUT_FIRST; o <= SDVO_OUTPUT_LAST; o++) ++ { ++ u16 this_output = (1 << o); ++ if (sdvo_priv->caps.output_flags & this_output) ++ { ++ intel_sdvo_set_target_output(intel_output, this_output); ++ intel_sdvo_get_output_timing(intel_output, ++ &sdvo_priv->save_output_dtd[o]); ++ } ++ } ++ ++ sdvo_priv->save_SDVOX = I915_READ(sdvo_priv->output_device); ++} ++ ++static void intel_sdvo_restore(struct drm_connector *connector) ++{ ++ struct drm_device *dev = connector->dev; ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ struct intel_output *intel_output = to_intel_output(connector); ++ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; ++ int o; ++ int i; ++ bool input1, input2; ++ u8 status; ++ ++ intel_sdvo_set_active_outputs(intel_output, 0); ++ ++ for (o = SDVO_OUTPUT_FIRST; o <= SDVO_OUTPUT_LAST; o++) ++ { ++ u16 this_output = (1 << o); ++ if (sdvo_priv->caps.output_flags & this_output) { ++ intel_sdvo_set_target_output(intel_output, this_output); ++ intel_sdvo_set_output_timing(intel_output, &sdvo_priv->save_output_dtd[o]); ++ } ++ } ++ ++ if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) { ++ intel_sdvo_set_target_input(intel_output, true, false); ++ intel_sdvo_set_input_timing(intel_output, &sdvo_priv->save_input_dtd_1); ++ } ++ ++ if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) { ++ intel_sdvo_set_target_input(intel_output, false, true); ++ intel_sdvo_set_input_timing(intel_output, &sdvo_priv->save_input_dtd_2); ++ } ++ ++ intel_sdvo_set_clock_rate_mult(intel_output, sdvo_priv->save_sdvo_mult); ++ ++ I915_WRITE(sdvo_priv->output_device, sdvo_priv->save_SDVOX); ++ ++ if (sdvo_priv->save_SDVOX & SDVO_ENABLE) ++ { ++ for (i = 0; i < 2; i++) ++ intel_wait_for_vblank(dev); ++ status = intel_sdvo_get_trained_inputs(intel_output, &input1, &input2); ++ if (status == SDVO_CMD_STATUS_SUCCESS && !input1) ++ DRM_DEBUG("First %s output reported failure to sync\n", ++ SDVO_NAME(sdvo_priv)); ++ } ++ ++ intel_sdvo_set_active_outputs(intel_output, sdvo_priv->save_active_outputs); ++} ++ ++static int intel_sdvo_mode_valid(struct drm_connector *connector, ++ struct drm_display_mode *mode) ++{ ++ struct intel_output *intel_output = to_intel_output(connector); ++ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; ++ ++ if (mode->flags & DRM_MODE_FLAG_DBLSCAN) ++ return MODE_NO_DBLESCAN; ++ ++ if (sdvo_priv->pixel_clock_min > mode->clock) ++ return MODE_CLOCK_LOW; ++ ++ if (sdvo_priv->pixel_clock_max < mode->clock) ++ return MODE_CLOCK_HIGH; ++ ++ return MODE_OK; ++} ++ ++static bool intel_sdvo_get_capabilities(struct intel_output *intel_output, struct intel_sdvo_caps *caps) ++{ ++ u8 status; ++ ++ intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_DEVICE_CAPS, NULL, 0); ++ status = intel_sdvo_read_response(intel_output, caps, sizeof(*caps)); ++ if (status != SDVO_CMD_STATUS_SUCCESS) ++ return false; ++ ++ return true; ++} ++ ++struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB) ++{ ++ struct drm_connector *connector = NULL; ++ struct intel_output *iout = NULL; ++ struct intel_sdvo_priv *sdvo; ++ ++ /* find the sdvo connector */ ++ list_for_each_entry(connector, &dev->mode_config.connector_list, head) { ++ iout = to_intel_output(connector); ++ ++ if (iout->type != INTEL_OUTPUT_SDVO) ++ continue; ++ ++ sdvo = iout->dev_priv; ++ ++ if (sdvo->output_device == SDVOB && sdvoB) ++ return connector; ++ ++ if (sdvo->output_device == SDVOC && !sdvoB) ++ return connector; ++ ++ } ++ ++ return NULL; ++} ++ ++int intel_sdvo_supports_hotplug(struct drm_connector *connector) ++{ ++ u8 response[2]; ++ u8 status; ++ struct intel_output *intel_output; ++ DRM_DEBUG("\n"); ++ ++ if (!connector) ++ return 0; ++ ++ intel_output = to_intel_output(connector); ++ ++ intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0); ++ status = intel_sdvo_read_response(intel_output, &response, 2); ++ ++ if (response[0] !=0) ++ return 1; ++ ++ return 0; ++} ++ ++void intel_sdvo_set_hotplug(struct drm_connector *connector, int on) ++{ ++ u8 response[2]; ++ u8 status; ++ struct intel_output *intel_output = to_intel_output(connector); ++ ++ intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); ++ intel_sdvo_read_response(intel_output, &response, 2); ++ ++ if (on) { ++ intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0); ++ status = intel_sdvo_read_response(intel_output, &response, 2); ++ ++ intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2); ++ } else { ++ response[0] = 0; ++ response[1] = 0; ++ intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2); ++ } ++ ++ intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); ++ intel_sdvo_read_response(intel_output, &response, 2); ++} ++ ++static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connector) ++{ ++ u8 response[2]; ++ u8 status; ++ struct intel_output *intel_output = to_intel_output(connector); ++ ++ intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0); ++ status = intel_sdvo_read_response(intel_output, &response, 2); ++ ++ DRM_DEBUG("SDVO response %d %d\n", response[0], response[1]); ++ if ((response[0] != 0) || (response[1] != 0)) ++ return connector_status_connected; ++ else ++ return connector_status_disconnected; ++} ++ ++static int intel_sdvo_get_modes(struct drm_connector *connector) ++{ ++ struct intel_output *intel_output = to_intel_output(connector); ++ ++ /* set the bus switch and get the modes */ ++ intel_sdvo_set_control_bus_switch(intel_output, SDVO_CONTROL_BUS_DDC2); ++ intel_ddc_get_modes(intel_output); ++ ++ if (list_empty(&connector->probed_modes)) ++ return 0; ++ return 1; ++} ++ ++static void intel_sdvo_destroy(struct drm_connector *connector) ++{ ++ struct intel_output *intel_output = to_intel_output(connector); ++ ++ if (intel_output->i2c_bus) ++ intel_i2c_destroy(intel_output->i2c_bus); ++ drm_sysfs_connector_remove(connector); ++ drm_connector_cleanup(connector); ++ kfree(intel_output); ++} ++ ++static const struct drm_encoder_helper_funcs intel_sdvo_helper_funcs = { ++ .dpms = intel_sdvo_dpms, ++ .mode_fixup = intel_sdvo_mode_fixup, ++ .prepare = intel_encoder_prepare, ++ .mode_set = intel_sdvo_mode_set, ++ .commit = intel_encoder_commit, ++}; ++ ++static const struct drm_connector_funcs intel_sdvo_connector_funcs = { ++ .save = intel_sdvo_save, ++ .restore = intel_sdvo_restore, ++ .detect = intel_sdvo_detect, ++ .fill_modes = drm_helper_probe_single_connector_modes, ++ .destroy = intel_sdvo_destroy, ++}; ++ ++static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs = { ++ .get_modes = intel_sdvo_get_modes, ++ .mode_valid = intel_sdvo_mode_valid, ++ .best_encoder = intel_best_encoder, ++}; ++ ++void intel_sdvo_enc_destroy(struct drm_encoder *encoder) ++{ ++ drm_encoder_cleanup(encoder); ++} ++ ++static const struct drm_encoder_funcs intel_sdvo_enc_funcs = { ++ .destroy = intel_sdvo_enc_destroy, ++}; ++ ++ ++void intel_sdvo_init(struct drm_device *dev, int output_device) ++{ ++ struct drm_connector *connector; ++ struct intel_output *intel_output; ++ struct intel_sdvo_priv *sdvo_priv; ++ struct intel_i2c_chan *i2cbus = NULL; ++ int connector_type; ++ u8 ch[0x40]; ++ int i; ++ int encoder_type, output_id; ++ ++ intel_output = kcalloc(sizeof(struct intel_output)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL); ++ if (!intel_output) { ++ return; ++ } ++ ++ connector = &intel_output->base; ++ ++ drm_connector_init(dev, connector, &intel_sdvo_connector_funcs, ++ DRM_MODE_CONNECTOR_Unknown); ++ drm_connector_helper_add(connector, &intel_sdvo_connector_helper_funcs); ++ sdvo_priv = (struct intel_sdvo_priv *)(intel_output + 1); ++ intel_output->type = INTEL_OUTPUT_SDVO; ++ ++ connector->interlace_allowed = 0; ++ connector->doublescan_allowed = 0; ++ ++ /* setup the DDC bus. */ ++ if (output_device == SDVOB) ++ i2cbus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB"); ++ else ++ i2cbus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC"); ++ ++ if (!i2cbus) ++ goto err_connector; ++ ++ sdvo_priv->i2c_bus = i2cbus; ++ ++ if (output_device == SDVOB) { ++ output_id = 1; ++ sdvo_priv->i2c_bus->slave_addr = 0x38; ++ } else { ++ output_id = 2; ++ sdvo_priv->i2c_bus->slave_addr = 0x39; ++ } ++ ++ sdvo_priv->output_device = output_device; ++ intel_output->i2c_bus = i2cbus; ++ intel_output->dev_priv = sdvo_priv; ++ ++ ++ /* Read the regs to test if we can talk to the device */ ++ for (i = 0; i < 0x40; i++) { ++ if (!intel_sdvo_read_byte(intel_output, i, &ch[i])) { ++ DRM_DEBUG("No SDVO device found on SDVO%c\n", ++ output_device == SDVOB ? 'B' : 'C'); ++ goto err_i2c; ++ } ++ } ++ ++ intel_sdvo_get_capabilities(intel_output, &sdvo_priv->caps); ++ ++ memset(&sdvo_priv->active_outputs, 0, sizeof(sdvo_priv->active_outputs)); ++ ++ /* TODO, CVBS, SVID, YPRPB & SCART outputs. */ ++ if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB0) ++ { ++ sdvo_priv->active_outputs = SDVO_OUTPUT_RGB0; ++ connector->display_info.subpixel_order = SubPixelHorizontalRGB; ++ encoder_type = DRM_MODE_ENCODER_DAC; ++ connector_type = DRM_MODE_CONNECTOR_VGA; ++ } ++ else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB1) ++ { ++ sdvo_priv->active_outputs = SDVO_OUTPUT_RGB1; ++ connector->display_info.subpixel_order = SubPixelHorizontalRGB; ++ encoder_type = DRM_MODE_ENCODER_DAC; ++ connector_type = DRM_MODE_CONNECTOR_VGA; ++ } ++ else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS0) ++ { ++ sdvo_priv->active_outputs = SDVO_OUTPUT_TMDS0; ++ connector->display_info.subpixel_order = SubPixelHorizontalRGB; ++ encoder_type = DRM_MODE_ENCODER_TMDS; ++ connector_type = DRM_MODE_CONNECTOR_DVID; ++ } ++ else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS1) ++ { ++ sdvo_priv->active_outputs = SDVO_OUTPUT_TMDS1; ++ connector->display_info.subpixel_order = SubPixelHorizontalRGB; ++ encoder_type = DRM_MODE_ENCODER_TMDS; ++ connector_type = DRM_MODE_CONNECTOR_DVID; ++ } ++ else ++ { ++ unsigned char bytes[2]; ++ ++ memcpy (bytes, &sdvo_priv->caps.output_flags, 2); ++ DRM_DEBUG("%s: No active RGB or TMDS outputs (0x%02x%02x)\n", ++ SDVO_NAME(sdvo_priv), ++ bytes[0], bytes[1]); ++ goto err_i2c; ++ } ++ ++ drm_encoder_init(dev, &intel_output->enc, &intel_sdvo_enc_funcs, encoder_type); ++ drm_encoder_helper_add(&intel_output->enc, &intel_sdvo_helper_funcs); ++ connector->connector_type = connector_type; ++ ++ drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc); ++ drm_sysfs_connector_add(connector); ++ ++ /* Set the input timing to the screen. Assume always input 0. */ ++ intel_sdvo_set_target_input(intel_output, true, false); ++ ++ intel_sdvo_get_input_pixel_clock_range(intel_output, ++ &sdvo_priv->pixel_clock_min, ++ &sdvo_priv->pixel_clock_max); ++ ++ ++ DRM_DEBUG("%s device VID/DID: %02X:%02X.%02X, " ++ "clock range %dMHz - %dMHz, " ++ "input 1: %c, input 2: %c, " ++ "output 1: %c, output 2: %c\n", ++ SDVO_NAME(sdvo_priv), ++ sdvo_priv->caps.vendor_id, sdvo_priv->caps.device_id, ++ sdvo_priv->caps.device_rev_id, ++ sdvo_priv->pixel_clock_min / 1000, ++ sdvo_priv->pixel_clock_max / 1000, ++ (sdvo_priv->caps.sdvo_inputs_mask & 0x1) ? 'Y' : 'N', ++ (sdvo_priv->caps.sdvo_inputs_mask & 0x2) ? 'Y' : 'N', ++ /* check currently supported outputs */ ++ sdvo_priv->caps.output_flags & ++ (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_RGB0) ? 'Y' : 'N', ++ sdvo_priv->caps.output_flags & ++ (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N'); ++ ++ intel_output->ddc_bus = i2cbus; ++ ++ return; ++ ++err_i2c: ++ intel_i2c_destroy(intel_output->i2c_bus); ++err_connector: ++ drm_connector_cleanup(connector); ++ kfree(intel_output); ++ ++ return; ++} +diff --git a/drivers/gpu/drm/i915/intel_sdvo_regs.h b/drivers/gpu/drm/i915/intel_sdvo_regs.h +new file mode 100644 +index 0000000..861a43f +--- /dev/null ++++ b/drivers/gpu/drm/i915/intel_sdvo_regs.h +@@ -0,0 +1,327 @@ ++/* ++ * Copyright © 2006-2007 Intel Corporation ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING ++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: ++ * Eric Anholt ++ */ ++ ++/** ++ * @file SDVO command definitions and structures. ++ */ ++ ++#define SDVO_OUTPUT_FIRST (0) ++#define SDVO_OUTPUT_TMDS0 (1 << 0) ++#define SDVO_OUTPUT_RGB0 (1 << 1) ++#define SDVO_OUTPUT_CVBS0 (1 << 2) ++#define SDVO_OUTPUT_SVID0 (1 << 3) ++#define SDVO_OUTPUT_YPRPB0 (1 << 4) ++#define SDVO_OUTPUT_SCART0 (1 << 5) ++#define SDVO_OUTPUT_LVDS0 (1 << 6) ++#define SDVO_OUTPUT_TMDS1 (1 << 8) ++#define SDVO_OUTPUT_RGB1 (1 << 9) ++#define SDVO_OUTPUT_CVBS1 (1 << 10) ++#define SDVO_OUTPUT_SVID1 (1 << 11) ++#define SDVO_OUTPUT_YPRPB1 (1 << 12) ++#define SDVO_OUTPUT_SCART1 (1 << 13) ++#define SDVO_OUTPUT_LVDS1 (1 << 14) ++#define SDVO_OUTPUT_LAST (14) ++ ++struct intel_sdvo_caps { ++ u8 vendor_id; ++ u8 device_id; ++ u8 device_rev_id; ++ u8 sdvo_version_major; ++ u8 sdvo_version_minor; ++ unsigned int sdvo_inputs_mask:2; ++ unsigned int smooth_scaling:1; ++ unsigned int sharp_scaling:1; ++ unsigned int up_scaling:1; ++ unsigned int down_scaling:1; ++ unsigned int stall_support:1; ++ unsigned int pad:1; ++ u16 output_flags; ++} __attribute__((packed)); ++ ++/** This matches the EDID DTD structure, more or less */ ++struct intel_sdvo_dtd { ++ struct { ++ u16 clock; /**< pixel clock, in 10kHz units */ ++ u8 h_active; /**< lower 8 bits (pixels) */ ++ u8 h_blank; /**< lower 8 bits (pixels) */ ++ u8 h_high; /**< upper 4 bits each h_active, h_blank */ ++ u8 v_active; /**< lower 8 bits (lines) */ ++ u8 v_blank; /**< lower 8 bits (lines) */ ++ u8 v_high; /**< upper 4 bits each v_active, v_blank */ ++ } part1; ++ ++ struct { ++ u8 h_sync_off; /**< lower 8 bits, from hblank start */ ++ u8 h_sync_width; /**< lower 8 bits (pixels) */ ++ /** lower 4 bits each vsync offset, vsync width */ ++ u8 v_sync_off_width; ++ /** ++ * 2 high bits of hsync offset, 2 high bits of hsync width, ++ * bits 4-5 of vsync offset, and 2 high bits of vsync width. ++ */ ++ u8 sync_off_width_high; ++ u8 dtd_flags; ++ u8 sdvo_flags; ++ /** bits 6-7 of vsync offset at bits 6-7 */ ++ u8 v_sync_off_high; ++ u8 reserved; ++ } part2; ++} __attribute__((packed)); ++ ++struct intel_sdvo_pixel_clock_range { ++ u16 min; /**< pixel clock, in 10kHz units */ ++ u16 max; /**< pixel clock, in 10kHz units */ ++} __attribute__((packed)); ++ ++struct intel_sdvo_preferred_input_timing_args { ++ u16 clock; ++ u16 width; ++ u16 height; ++} __attribute__((packed)); ++ ++/* I2C registers for SDVO */ ++#define SDVO_I2C_ARG_0 0x07 ++#define SDVO_I2C_ARG_1 0x06 ++#define SDVO_I2C_ARG_2 0x05 ++#define SDVO_I2C_ARG_3 0x04 ++#define SDVO_I2C_ARG_4 0x03 ++#define SDVO_I2C_ARG_5 0x02 ++#define SDVO_I2C_ARG_6 0x01 ++#define SDVO_I2C_ARG_7 0x00 ++#define SDVO_I2C_OPCODE 0x08 ++#define SDVO_I2C_CMD_STATUS 0x09 ++#define SDVO_I2C_RETURN_0 0x0a ++#define SDVO_I2C_RETURN_1 0x0b ++#define SDVO_I2C_RETURN_2 0x0c ++#define SDVO_I2C_RETURN_3 0x0d ++#define SDVO_I2C_RETURN_4 0x0e ++#define SDVO_I2C_RETURN_5 0x0f ++#define SDVO_I2C_RETURN_6 0x10 ++#define SDVO_I2C_RETURN_7 0x11 ++#define SDVO_I2C_VENDOR_BEGIN 0x20 ++ ++/* Status results */ ++#define SDVO_CMD_STATUS_POWER_ON 0x0 ++#define SDVO_CMD_STATUS_SUCCESS 0x1 ++#define SDVO_CMD_STATUS_NOTSUPP 0x2 ++#define SDVO_CMD_STATUS_INVALID_ARG 0x3 ++#define SDVO_CMD_STATUS_PENDING 0x4 ++#define SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED 0x5 ++#define SDVO_CMD_STATUS_SCALING_NOT_SUPP 0x6 ++ ++/* SDVO commands, argument/result registers */ ++ ++#define SDVO_CMD_RESET 0x01 ++ ++/** Returns a struct intel_sdvo_caps */ ++#define SDVO_CMD_GET_DEVICE_CAPS 0x02 ++ ++#define SDVO_CMD_GET_FIRMWARE_REV 0x86 ++# define SDVO_DEVICE_FIRMWARE_MINOR SDVO_I2C_RETURN_0 ++# define SDVO_DEVICE_FIRMWARE_MAJOR SDVO_I2C_RETURN_1 ++# define SDVO_DEVICE_FIRMWARE_PATCH SDVO_I2C_RETURN_2 ++ ++/** ++ * Reports which inputs are trained (managed to sync). ++ * ++ * Devices must have trained within 2 vsyncs of a mode change. ++ */ ++#define SDVO_CMD_GET_TRAINED_INPUTS 0x03 ++struct intel_sdvo_get_trained_inputs_response { ++ unsigned int input0_trained:1; ++ unsigned int input1_trained:1; ++ unsigned int pad:6; ++} __attribute__((packed)); ++ ++/** Returns a struct intel_sdvo_output_flags of active outputs. */ ++#define SDVO_CMD_GET_ACTIVE_OUTPUTS 0x04 ++ ++/** ++ * Sets the current set of active outputs. ++ * ++ * Takes a struct intel_sdvo_output_flags. Must be preceded by a SET_IN_OUT_MAP ++ * on multi-output devices. ++ */ ++#define SDVO_CMD_SET_ACTIVE_OUTPUTS 0x05 ++ ++/** ++ * Returns the current mapping of SDVO inputs to outputs on the device. ++ * ++ * Returns two struct intel_sdvo_output_flags structures. ++ */ ++#define SDVO_CMD_GET_IN_OUT_MAP 0x06 ++ ++/** ++ * Sets the current mapping of SDVO inputs to outputs on the device. ++ * ++ * Takes two struct i380_sdvo_output_flags structures. ++ */ ++#define SDVO_CMD_SET_IN_OUT_MAP 0x07 ++ ++/** ++ * Returns a struct intel_sdvo_output_flags of attached displays. ++ */ ++#define SDVO_CMD_GET_ATTACHED_DISPLAYS 0x0b ++ ++/** ++ * Returns a struct intel_sdvo_ouptut_flags of displays supporting hot plugging. ++ */ ++#define SDVO_CMD_GET_HOT_PLUG_SUPPORT 0x0c ++ ++/** ++ * Takes a struct intel_sdvo_output_flags. ++ */ ++#define SDVO_CMD_SET_ACTIVE_HOT_PLUG 0x0d ++ ++/** ++ * Returns a struct intel_sdvo_output_flags of displays with hot plug ++ * interrupts enabled. ++ */ ++#define SDVO_CMD_GET_ACTIVE_HOT_PLUG 0x0e ++ ++#define SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE 0x0f ++struct intel_sdvo_get_interrupt_event_source_response { ++ u16 interrupt_status; ++ unsigned int ambient_light_interrupt:1; ++ unsigned int pad:7; ++} __attribute__((packed)); ++ ++/** ++ * Selects which input is affected by future input commands. ++ * ++ * Commands affected include SET_INPUT_TIMINGS_PART[12], ++ * GET_INPUT_TIMINGS_PART[12], GET_PREFERRED_INPUT_TIMINGS_PART[12], ++ * GET_INPUT_PIXEL_CLOCK_RANGE, and CREATE_PREFERRED_INPUT_TIMINGS. ++ */ ++#define SDVO_CMD_SET_TARGET_INPUT 0x10 ++struct intel_sdvo_set_target_input_args { ++ unsigned int target_1:1; ++ unsigned int pad:7; ++} __attribute__((packed)); ++ ++/** ++ * Takes a struct intel_sdvo_output_flags of which outputs are targetted by ++ * future output commands. ++ * ++ * Affected commands inclue SET_OUTPUT_TIMINGS_PART[12], ++ * GET_OUTPUT_TIMINGS_PART[12], and GET_OUTPUT_PIXEL_CLOCK_RANGE. ++ */ ++#define SDVO_CMD_SET_TARGET_OUTPUT 0x11 ++ ++#define SDVO_CMD_GET_INPUT_TIMINGS_PART1 0x12 ++#define SDVO_CMD_GET_INPUT_TIMINGS_PART2 0x13 ++#define SDVO_CMD_SET_INPUT_TIMINGS_PART1 0x14 ++#define SDVO_CMD_SET_INPUT_TIMINGS_PART2 0x15 ++#define SDVO_CMD_SET_OUTPUT_TIMINGS_PART1 0x16 ++#define SDVO_CMD_SET_OUTPUT_TIMINGS_PART2 0x17 ++#define SDVO_CMD_GET_OUTPUT_TIMINGS_PART1 0x18 ++#define SDVO_CMD_GET_OUTPUT_TIMINGS_PART2 0x19 ++/* Part 1 */ ++# define SDVO_DTD_CLOCK_LOW SDVO_I2C_ARG_0 ++# define SDVO_DTD_CLOCK_HIGH SDVO_I2C_ARG_1 ++# define SDVO_DTD_H_ACTIVE SDVO_I2C_ARG_2 ++# define SDVO_DTD_H_BLANK SDVO_I2C_ARG_3 ++# define SDVO_DTD_H_HIGH SDVO_I2C_ARG_4 ++# define SDVO_DTD_V_ACTIVE SDVO_I2C_ARG_5 ++# define SDVO_DTD_V_BLANK SDVO_I2C_ARG_6 ++# define SDVO_DTD_V_HIGH SDVO_I2C_ARG_7 ++/* Part 2 */ ++# define SDVO_DTD_HSYNC_OFF SDVO_I2C_ARG_0 ++# define SDVO_DTD_HSYNC_WIDTH SDVO_I2C_ARG_1 ++# define SDVO_DTD_VSYNC_OFF_WIDTH SDVO_I2C_ARG_2 ++# define SDVO_DTD_SYNC_OFF_WIDTH_HIGH SDVO_I2C_ARG_3 ++# define SDVO_DTD_DTD_FLAGS SDVO_I2C_ARG_4 ++# define SDVO_DTD_DTD_FLAG_INTERLACED (1 << 7) ++# define SDVO_DTD_DTD_FLAG_STEREO_MASK (3 << 5) ++# define SDVO_DTD_DTD_FLAG_INPUT_MASK (3 << 3) ++# define SDVO_DTD_DTD_FLAG_SYNC_MASK (3 << 1) ++# define SDVO_DTD_SDVO_FLAS SDVO_I2C_ARG_5 ++# define SDVO_DTD_SDVO_FLAG_STALL (1 << 7) ++# define SDVO_DTD_SDVO_FLAG_CENTERED (0 << 6) ++# define SDVO_DTD_SDVO_FLAG_UPPER_LEFT (1 << 6) ++# define SDVO_DTD_SDVO_FLAG_SCALING_MASK (3 << 4) ++# define SDVO_DTD_SDVO_FLAG_SCALING_NONE (0 << 4) ++# define SDVO_DTD_SDVO_FLAG_SCALING_SHARP (1 << 4) ++# define SDVO_DTD_SDVO_FLAG_SCALING_SMOOTH (2 << 4) ++# define SDVO_DTD_VSYNC_OFF_HIGH SDVO_I2C_ARG_6 ++ ++/** ++ * Generates a DTD based on the given width, height, and flags. ++ * ++ * This will be supported by any device supporting scaling or interlaced ++ * modes. ++ */ ++#define SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING 0x1a ++# define SDVO_PREFERRED_INPUT_TIMING_CLOCK_LOW SDVO_I2C_ARG_0 ++# define SDVO_PREFERRED_INPUT_TIMING_CLOCK_HIGH SDVO_I2C_ARG_1 ++# define SDVO_PREFERRED_INPUT_TIMING_WIDTH_LOW SDVO_I2C_ARG_2 ++# define SDVO_PREFERRED_INPUT_TIMING_WIDTH_HIGH SDVO_I2C_ARG_3 ++# define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_LOW SDVO_I2C_ARG_4 ++# define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_HIGH SDVO_I2C_ARG_5 ++# define SDVO_PREFERRED_INPUT_TIMING_FLAGS SDVO_I2C_ARG_6 ++# define SDVO_PREFERRED_INPUT_TIMING_FLAGS_INTERLACED (1 << 0) ++# define SDVO_PREFERRED_INPUT_TIMING_FLAGS_SCALED (1 << 1) ++ ++#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1 0x1b ++#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2 0x1c ++ ++/** Returns a struct intel_sdvo_pixel_clock_range */ ++#define SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE 0x1d ++/** Returns a struct intel_sdvo_pixel_clock_range */ ++#define SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE 0x1e ++ ++/** Returns a byte bitfield containing SDVO_CLOCK_RATE_MULT_* flags */ ++#define SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS 0x1f ++ ++/** Returns a byte containing a SDVO_CLOCK_RATE_MULT_* flag */ ++#define SDVO_CMD_GET_CLOCK_RATE_MULT 0x20 ++/** Takes a byte containing a SDVO_CLOCK_RATE_MULT_* flag */ ++#define SDVO_CMD_SET_CLOCK_RATE_MULT 0x21 ++# define SDVO_CLOCK_RATE_MULT_1X (1 << 0) ++# define SDVO_CLOCK_RATE_MULT_2X (1 << 1) ++# define SDVO_CLOCK_RATE_MULT_4X (1 << 3) ++ ++#define SDVO_CMD_GET_SUPPORTED_TV_FORMATS 0x27 ++ ++#define SDVO_CMD_GET_TV_FORMAT 0x28 ++ ++#define SDVO_CMD_SET_TV_FORMAT 0x29 ++ ++#define SDVO_CMD_GET_SUPPORTED_POWER_STATES 0x2a ++#define SDVO_CMD_GET_ENCODER_POWER_STATE 0x2b ++#define SDVO_CMD_SET_ENCODER_POWER_STATE 0x2c ++# define SDVO_ENCODER_STATE_ON (1 << 0) ++# define SDVO_ENCODER_STATE_STANDBY (1 << 1) ++# define SDVO_ENCODER_STATE_SUSPEND (1 << 2) ++# define SDVO_ENCODER_STATE_OFF (1 << 3) ++ ++#define SDVO_CMD_SET_TV_RESOLUTION_SUPPORT 0x93 ++ ++#define SDVO_CMD_SET_CONTROL_BUS_SWITCH 0x7a ++# define SDVO_CONTROL_BUS_PROM 0x0 ++# define SDVO_CONTROL_BUS_DDC1 0x1 ++# define SDVO_CONTROL_BUS_DDC2 0x2 ++# define SDVO_CONTROL_BUS_DDC3 0x3 +diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c +new file mode 100644 +index 0000000..d409b86 +--- /dev/null ++++ b/drivers/gpu/drm/i915/intel_tv.c +@@ -0,0 +1,1725 @@ ++/* ++ * Copyright © 2006-2008 Intel Corporation ++ * Jesse Barnes ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING ++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: ++ * Eric Anholt ++ * ++ */ ++ ++/** @file ++ * Integrated TV-out support for the 915GM and 945GM. ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "drm_crtc.h" ++#include "drm_edid.h" ++#include "intel_drv.h" ++#include "i915_drm.h" ++#include "i915_drv.h" ++ ++enum tv_margin { ++ TV_MARGIN_LEFT, TV_MARGIN_TOP, ++ TV_MARGIN_RIGHT, TV_MARGIN_BOTTOM ++}; ++ ++/** Private structure for the integrated TV support */ ++struct intel_tv_priv { ++ int type; ++ char *tv_format; ++ int margin[4]; ++ u32 save_TV_H_CTL_1; ++ u32 save_TV_H_CTL_2; ++ u32 save_TV_H_CTL_3; ++ u32 save_TV_V_CTL_1; ++ u32 save_TV_V_CTL_2; ++ u32 save_TV_V_CTL_3; ++ u32 save_TV_V_CTL_4; ++ u32 save_TV_V_CTL_5; ++ u32 save_TV_V_CTL_6; ++ u32 save_TV_V_CTL_7; ++ u32 save_TV_SC_CTL_1, save_TV_SC_CTL_2, save_TV_SC_CTL_3; ++ ++ u32 save_TV_CSC_Y; ++ u32 save_TV_CSC_Y2; ++ u32 save_TV_CSC_U; ++ u32 save_TV_CSC_U2; ++ u32 save_TV_CSC_V; ++ u32 save_TV_CSC_V2; ++ u32 save_TV_CLR_KNOBS; ++ u32 save_TV_CLR_LEVEL; ++ u32 save_TV_WIN_POS; ++ u32 save_TV_WIN_SIZE; ++ u32 save_TV_FILTER_CTL_1; ++ u32 save_TV_FILTER_CTL_2; ++ u32 save_TV_FILTER_CTL_3; ++ ++ u32 save_TV_H_LUMA[60]; ++ u32 save_TV_H_CHROMA[60]; ++ u32 save_TV_V_LUMA[43]; ++ u32 save_TV_V_CHROMA[43]; ++ ++ u32 save_TV_DAC; ++ u32 save_TV_CTL; ++}; ++ ++struct video_levels { ++ int blank, black, burst; ++}; ++ ++struct color_conversion { ++ u16 ry, gy, by, ay; ++ u16 ru, gu, bu, au; ++ u16 rv, gv, bv, av; ++}; ++ ++static const u32 filter_table[] = { ++ 0xB1403000, 0x2E203500, 0x35002E20, 0x3000B140, ++ 0x35A0B160, 0x2DC02E80, 0xB1403480, 0xB1603000, ++ 0x2EA03640, 0x34002D80, 0x3000B120, 0x36E0B160, ++ 0x2D202EF0, 0xB1203380, 0xB1603000, 0x2F303780, ++ 0x33002CC0, 0x3000B100, 0x3820B160, 0x2C802F50, ++ 0xB10032A0, 0xB1603000, 0x2F9038C0, 0x32202C20, ++ 0x3000B0E0, 0x3980B160, 0x2BC02FC0, 0xB0E031C0, ++ 0xB1603000, 0x2FF03A20, 0x31602B60, 0xB020B0C0, ++ 0x3AE0B160, 0x2B001810, 0xB0C03120, 0xB140B020, ++ 0x18283BA0, 0x30C02A80, 0xB020B0A0, 0x3C60B140, ++ 0x2A201838, 0xB0A03080, 0xB120B020, 0x18383D20, ++ 0x304029C0, 0xB040B080, 0x3DE0B100, 0x29601848, ++ 0xB0803000, 0xB100B040, 0x18483EC0, 0xB0402900, ++ 0xB040B060, 0x3F80B0C0, 0x28801858, 0xB060B080, ++ 0xB0A0B060, 0x18602820, 0xB0A02820, 0x0000B060, ++ 0xB1403000, 0x2E203500, 0x35002E20, 0x3000B140, ++ 0x35A0B160, 0x2DC02E80, 0xB1403480, 0xB1603000, ++ 0x2EA03640, 0x34002D80, 0x3000B120, 0x36E0B160, ++ 0x2D202EF0, 0xB1203380, 0xB1603000, 0x2F303780, ++ 0x33002CC0, 0x3000B100, 0x3820B160, 0x2C802F50, ++ 0xB10032A0, 0xB1603000, 0x2F9038C0, 0x32202C20, ++ 0x3000B0E0, 0x3980B160, 0x2BC02FC0, 0xB0E031C0, ++ 0xB1603000, 0x2FF03A20, 0x31602B60, 0xB020B0C0, ++ 0x3AE0B160, 0x2B001810, 0xB0C03120, 0xB140B020, ++ 0x18283BA0, 0x30C02A80, 0xB020B0A0, 0x3C60B140, ++ 0x2A201838, 0xB0A03080, 0xB120B020, 0x18383D20, ++ 0x304029C0, 0xB040B080, 0x3DE0B100, 0x29601848, ++ 0xB0803000, 0xB100B040, 0x18483EC0, 0xB0402900, ++ 0xB040B060, 0x3F80B0C0, 0x28801858, 0xB060B080, ++ 0xB0A0B060, 0x18602820, 0xB0A02820, 0x0000B060, ++ 0x36403000, 0x2D002CC0, 0x30003640, 0x2D0036C0, ++ 0x35C02CC0, 0x37403000, 0x2C802D40, 0x30003540, ++ 0x2D8037C0, 0x34C02C40, 0x38403000, 0x2BC02E00, ++ 0x30003440, 0x2E2038C0, 0x34002B80, 0x39803000, ++ 0x2B402E40, 0x30003380, 0x2E603A00, 0x33402B00, ++ 0x3A803040, 0x2A802EA0, 0x30403300, 0x2EC03B40, ++ 0x32802A40, 0x3C003040, 0x2A002EC0, 0x30803240, ++ 0x2EC03C80, 0x320029C0, 0x3D403080, 0x29402F00, ++ 0x308031C0, 0x2F203DC0, 0x31802900, 0x3E8030C0, ++ 0x28802F40, 0x30C03140, 0x2F203F40, 0x31402840, ++ 0x28003100, 0x28002F00, 0x00003100, 0x36403000, ++ 0x2D002CC0, 0x30003640, 0x2D0036C0, ++ 0x35C02CC0, 0x37403000, 0x2C802D40, 0x30003540, ++ 0x2D8037C0, 0x34C02C40, 0x38403000, 0x2BC02E00, ++ 0x30003440, 0x2E2038C0, 0x34002B80, 0x39803000, ++ 0x2B402E40, 0x30003380, 0x2E603A00, 0x33402B00, ++ 0x3A803040, 0x2A802EA0, 0x30403300, 0x2EC03B40, ++ 0x32802A40, 0x3C003040, 0x2A002EC0, 0x30803240, ++ 0x2EC03C80, 0x320029C0, 0x3D403080, 0x29402F00, ++ 0x308031C0, 0x2F203DC0, 0x31802900, 0x3E8030C0, ++ 0x28802F40, 0x30C03140, 0x2F203F40, 0x31402840, ++ 0x28003100, 0x28002F00, 0x00003100, ++}; ++ ++/* ++ * Color conversion values have 3 separate fixed point formats: ++ * ++ * 10 bit fields (ay, au) ++ * 1.9 fixed point (b.bbbbbbbbb) ++ * 11 bit fields (ry, by, ru, gu, gv) ++ * exp.mantissa (ee.mmmmmmmmm) ++ * ee = 00 = 10^-1 (0.mmmmmmmmm) ++ * ee = 01 = 10^-2 (0.0mmmmmmmmm) ++ * ee = 10 = 10^-3 (0.00mmmmmmmmm) ++ * ee = 11 = 10^-4 (0.000mmmmmmmmm) ++ * 12 bit fields (gy, rv, bu) ++ * exp.mantissa (eee.mmmmmmmmm) ++ * eee = 000 = 10^-1 (0.mmmmmmmmm) ++ * eee = 001 = 10^-2 (0.0mmmmmmmmm) ++ * eee = 010 = 10^-3 (0.00mmmmmmmmm) ++ * eee = 011 = 10^-4 (0.000mmmmmmmmm) ++ * eee = 100 = reserved ++ * eee = 101 = reserved ++ * eee = 110 = reserved ++ * eee = 111 = 10^0 (m.mmmmmmmm) (only usable for 1.0 representation) ++ * ++ * Saturation and contrast are 8 bits, with their own representation: ++ * 8 bit field (saturation, contrast) ++ * exp.mantissa (ee.mmmmmm) ++ * ee = 00 = 10^-1 (0.mmmmmm) ++ * ee = 01 = 10^0 (m.mmmmm) ++ * ee = 10 = 10^1 (mm.mmmm) ++ * ee = 11 = 10^2 (mmm.mmm) ++ * ++ * Simple conversion function: ++ * ++ * static u32 ++ * float_to_csc_11(float f) ++ * { ++ * u32 exp; ++ * u32 mant; ++ * u32 ret; ++ * ++ * if (f < 0) ++ * f = -f; ++ * ++ * if (f >= 1) { ++ * exp = 0x7; ++ * mant = 1 << 8; ++ * } else { ++ * for (exp = 0; exp < 3 && f < 0.5; exp++) ++ * f *= 2.0; ++ * mant = (f * (1 << 9) + 0.5); ++ * if (mant >= (1 << 9)) ++ * mant = (1 << 9) - 1; ++ * } ++ * ret = (exp << 9) | mant; ++ * return ret; ++ * } ++ */ ++ ++/* ++ * Behold, magic numbers! If we plant them they might grow a big ++ * s-video cable to the sky... or something. ++ * ++ * Pre-converted to appropriate hex value. ++ */ ++ ++/* ++ * PAL & NTSC values for composite & s-video connections ++ */ ++static const struct color_conversion ntsc_m_csc_composite = { ++ .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104, ++ .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0f00, ++ .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0f00, ++}; ++ ++static const struct video_levels ntsc_m_levels_composite = { ++ .blank = 225, .black = 267, .burst = 113, ++}; ++ ++static const struct color_conversion ntsc_m_csc_svideo = { ++ .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0134, ++ .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0f00, ++ .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0f00, ++}; ++ ++static const struct video_levels ntsc_m_levels_svideo = { ++ .blank = 266, .black = 316, .burst = 133, ++}; ++ ++static const struct color_conversion ntsc_j_csc_composite = { ++ .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0119, ++ .ru = 0x074c, .gu = 0x0546, .bu = 0x05ec, .au = 0x0f00, ++ .rv = 0x035a, .gv = 0x0322, .bv = 0x06e1, .av = 0x0f00, ++}; ++ ++static const struct video_levels ntsc_j_levels_composite = { ++ .blank = 225, .black = 225, .burst = 113, ++}; ++ ++static const struct color_conversion ntsc_j_csc_svideo = { ++ .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x014c, ++ .ru = 0x0788, .gu = 0x0581, .bu = 0x0322, .au = 0x0f00, ++ .rv = 0x0399, .gv = 0x0356, .bv = 0x070a, .av = 0x0f00, ++}; ++ ++static const struct video_levels ntsc_j_levels_svideo = { ++ .blank = 266, .black = 266, .burst = 133, ++}; ++ ++static const struct color_conversion pal_csc_composite = { ++ .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0113, ++ .ru = 0x0745, .gu = 0x053f, .bu = 0x05e1, .au = 0x0f00, ++ .rv = 0x0353, .gv = 0x031c, .bv = 0x06dc, .av = 0x0f00, ++}; ++ ++static const struct video_levels pal_levels_composite = { ++ .blank = 237, .black = 237, .burst = 118, ++}; ++ ++static const struct color_conversion pal_csc_svideo = { ++ .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0145, ++ .ru = 0x0780, .gu = 0x0579, .bu = 0x031c, .au = 0x0f00, ++ .rv = 0x0390, .gv = 0x034f, .bv = 0x0705, .av = 0x0f00, ++}; ++ ++static const struct video_levels pal_levels_svideo = { ++ .blank = 280, .black = 280, .burst = 139, ++}; ++ ++static const struct color_conversion pal_m_csc_composite = { ++ .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104, ++ .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0f00, ++ .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0f00, ++}; ++ ++static const struct video_levels pal_m_levels_composite = { ++ .blank = 225, .black = 267, .burst = 113, ++}; ++ ++static const struct color_conversion pal_m_csc_svideo = { ++ .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0134, ++ .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0f00, ++ .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0f00, ++}; ++ ++static const struct video_levels pal_m_levels_svideo = { ++ .blank = 266, .black = 316, .burst = 133, ++}; ++ ++static const struct color_conversion pal_n_csc_composite = { ++ .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104, ++ .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0f00, ++ .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0f00, ++}; ++ ++static const struct video_levels pal_n_levels_composite = { ++ .blank = 225, .black = 267, .burst = 118, ++}; ++ ++static const struct color_conversion pal_n_csc_svideo = { ++ .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0134, ++ .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0f00, ++ .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0f00, ++}; ++ ++static const struct video_levels pal_n_levels_svideo = { ++ .blank = 266, .black = 316, .burst = 139, ++}; ++ ++/* ++ * Component connections ++ */ ++static const struct color_conversion sdtv_csc_yprpb = { ++ .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0146, ++ .ru = 0x0559, .gu = 0x0353, .bu = 0x0100, .au = 0x0f00, ++ .rv = 0x0100, .gv = 0x03ad, .bv = 0x074d, .av = 0x0f00, ++}; ++ ++static const struct color_conversion sdtv_csc_rgb = { ++ .ry = 0x0000, .gy = 0x0f00, .by = 0x0000, .ay = 0x0166, ++ .ru = 0x0000, .gu = 0x0000, .bu = 0x0f00, .au = 0x0166, ++ .rv = 0x0f00, .gv = 0x0000, .bv = 0x0000, .av = 0x0166, ++}; ++ ++static const struct color_conversion hdtv_csc_yprpb = { ++ .ry = 0x05b3, .gy = 0x016e, .by = 0x0728, .ay = 0x0146, ++ .ru = 0x07d5, .gu = 0x038b, .bu = 0x0100, .au = 0x0f00, ++ .rv = 0x0100, .gv = 0x03d1, .bv = 0x06bc, .av = 0x0f00, ++}; ++ ++static const struct color_conversion hdtv_csc_rgb = { ++ .ry = 0x0000, .gy = 0x0f00, .by = 0x0000, .ay = 0x0166, ++ .ru = 0x0000, .gu = 0x0000, .bu = 0x0f00, .au = 0x0166, ++ .rv = 0x0f00, .gv = 0x0000, .bv = 0x0000, .av = 0x0166, ++}; ++ ++static const struct video_levels component_levels = { ++ .blank = 279, .black = 279, .burst = 0, ++}; ++ ++ ++struct tv_mode { ++ char *name; ++ int clock; ++ int refresh; /* in millihertz (for precision) */ ++ u32 oversample; ++ int hsync_end, hblank_start, hblank_end, htotal; ++ bool progressive, trilevel_sync, component_only; ++ int vsync_start_f1, vsync_start_f2, vsync_len; ++ bool veq_ena; ++ int veq_start_f1, veq_start_f2, veq_len; ++ int vi_end_f1, vi_end_f2, nbr_end; ++ bool burst_ena; ++ int hburst_start, hburst_len; ++ int vburst_start_f1, vburst_end_f1; ++ int vburst_start_f2, vburst_end_f2; ++ int vburst_start_f3, vburst_end_f3; ++ int vburst_start_f4, vburst_end_f4; ++ /* ++ * subcarrier programming ++ */ ++ int dda2_size, dda3_size, dda1_inc, dda2_inc, dda3_inc; ++ u32 sc_reset; ++ bool pal_burst; ++ /* ++ * blank/black levels ++ */ ++ const struct video_levels *composite_levels, *svideo_levels; ++ const struct color_conversion *composite_color, *svideo_color; ++ const u32 *filter_table; ++ int max_srcw; ++}; ++ ++ ++/* ++ * Sub carrier DDA ++ * ++ * I think this works as follows: ++ * ++ * subcarrier freq = pixel_clock * (dda1_inc + dda2_inc / dda2_size) / 4096 ++ * ++ * Presumably, when dda3 is added in, it gets to adjust the dda2_inc value ++ * ++ * So, ++ * dda1_ideal = subcarrier/pixel * 4096 ++ * dda1_inc = floor (dda1_ideal) ++ * dda2 = dda1_ideal - dda1_inc ++ * ++ * then pick a ratio for dda2 that gives the closest approximation. If ++ * you can't get close enough, you can play with dda3 as well. This ++ * seems likely to happen when dda2 is small as the jumps would be larger ++ * ++ * To invert this, ++ * ++ * pixel_clock = subcarrier * 4096 / (dda1_inc + dda2_inc / dda2_size) ++ * ++ * The constants below were all computed using a 107.520MHz clock ++ */ ++ ++/** ++ * Register programming values for TV modes. ++ * ++ * These values account for -1s required. ++ */ ++ ++const static struct tv_mode tv_modes[] = { ++ { ++ .name = "NTSC-M", ++ .clock = 107520, ++ .refresh = 29970, ++ .oversample = TV_OVERSAMPLE_8X, ++ .component_only = 0, ++ /* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 3.580MHz */ ++ ++ .hsync_end = 64, .hblank_end = 124, ++ .hblank_start = 836, .htotal = 857, ++ ++ .progressive = false, .trilevel_sync = false, ++ ++ .vsync_start_f1 = 6, .vsync_start_f2 = 7, ++ .vsync_len = 6, ++ ++ .veq_ena = true, .veq_start_f1 = 0, ++ .veq_start_f2 = 1, .veq_len = 18, ++ ++ .vi_end_f1 = 20, .vi_end_f2 = 21, ++ .nbr_end = 240, ++ ++ .burst_ena = true, ++ .hburst_start = 72, .hburst_len = 34, ++ .vburst_start_f1 = 9, .vburst_end_f1 = 240, ++ .vburst_start_f2 = 10, .vburst_end_f2 = 240, ++ .vburst_start_f3 = 9, .vburst_end_f3 = 240, ++ .vburst_start_f4 = 10, .vburst_end_f4 = 240, ++ ++ /* desired 3.5800000 actual 3.5800000 clock 107.52 */ ++ .dda1_inc = 136, ++ .dda2_inc = 7624, .dda2_size = 20013, ++ .dda3_inc = 0, .dda3_size = 0, ++ .sc_reset = TV_SC_RESET_EVERY_4, ++ .pal_burst = false, ++ ++ .composite_levels = &ntsc_m_levels_composite, ++ .composite_color = &ntsc_m_csc_composite, ++ .svideo_levels = &ntsc_m_levels_svideo, ++ .svideo_color = &ntsc_m_csc_svideo, ++ ++ .filter_table = filter_table, ++ }, ++ { ++ .name = "NTSC-443", ++ .clock = 107520, ++ .refresh = 29970, ++ .oversample = TV_OVERSAMPLE_8X, ++ .component_only = 0, ++ /* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 4.43MHz */ ++ .hsync_end = 64, .hblank_end = 124, ++ .hblank_start = 836, .htotal = 857, ++ ++ .progressive = false, .trilevel_sync = false, ++ ++ .vsync_start_f1 = 6, .vsync_start_f2 = 7, ++ .vsync_len = 6, ++ ++ .veq_ena = true, .veq_start_f1 = 0, ++ .veq_start_f2 = 1, .veq_len = 18, ++ ++ .vi_end_f1 = 20, .vi_end_f2 = 21, ++ .nbr_end = 240, ++ ++ .burst_ena = 8, ++ .hburst_start = 72, .hburst_len = 34, ++ .vburst_start_f1 = 9, .vburst_end_f1 = 240, ++ .vburst_start_f2 = 10, .vburst_end_f2 = 240, ++ .vburst_start_f3 = 9, .vburst_end_f3 = 240, ++ .vburst_start_f4 = 10, .vburst_end_f4 = 240, ++ ++ /* desired 4.4336180 actual 4.4336180 clock 107.52 */ ++ .dda1_inc = 168, ++ .dda2_inc = 18557, .dda2_size = 20625, ++ .dda3_inc = 0, .dda3_size = 0, ++ .sc_reset = TV_SC_RESET_EVERY_8, ++ .pal_burst = true, ++ ++ .composite_levels = &ntsc_m_levels_composite, ++ .composite_color = &ntsc_m_csc_composite, ++ .svideo_levels = &ntsc_m_levels_svideo, ++ .svideo_color = &ntsc_m_csc_svideo, ++ ++ .filter_table = filter_table, ++ }, ++ { ++ .name = "NTSC-J", ++ .clock = 107520, ++ .refresh = 29970, ++ .oversample = TV_OVERSAMPLE_8X, ++ .component_only = 0, ++ ++ /* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 3.580MHz */ ++ .hsync_end = 64, .hblank_end = 124, ++ .hblank_start = 836, .htotal = 857, ++ ++ .progressive = false, .trilevel_sync = false, ++ ++ .vsync_start_f1 = 6, .vsync_start_f2 = 7, ++ .vsync_len = 6, ++ ++ .veq_ena = true, .veq_start_f1 = 0, ++ .veq_start_f2 = 1, .veq_len = 18, ++ ++ .vi_end_f1 = 20, .vi_end_f2 = 21, ++ .nbr_end = 240, ++ ++ .burst_ena = true, ++ .hburst_start = 72, .hburst_len = 34, ++ .vburst_start_f1 = 9, .vburst_end_f1 = 240, ++ .vburst_start_f2 = 10, .vburst_end_f2 = 240, ++ .vburst_start_f3 = 9, .vburst_end_f3 = 240, ++ .vburst_start_f4 = 10, .vburst_end_f4 = 240, ++ ++ /* desired 3.5800000 actual 3.5800000 clock 107.52 */ ++ .dda1_inc = 136, ++ .dda2_inc = 7624, .dda2_size = 20013, ++ .dda3_inc = 0, .dda3_size = 0, ++ .sc_reset = TV_SC_RESET_EVERY_4, ++ .pal_burst = false, ++ ++ .composite_levels = &ntsc_j_levels_composite, ++ .composite_color = &ntsc_j_csc_composite, ++ .svideo_levels = &ntsc_j_levels_svideo, ++ .svideo_color = &ntsc_j_csc_svideo, ++ ++ .filter_table = filter_table, ++ }, ++ { ++ .name = "PAL-M", ++ .clock = 107520, ++ .refresh = 29970, ++ .oversample = TV_OVERSAMPLE_8X, ++ .component_only = 0, ++ ++ /* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 3.580MHz */ ++ .hsync_end = 64, .hblank_end = 124, ++ .hblank_start = 836, .htotal = 857, ++ ++ .progressive = false, .trilevel_sync = false, ++ ++ .vsync_start_f1 = 6, .vsync_start_f2 = 7, ++ .vsync_len = 6, ++ ++ .veq_ena = true, .veq_start_f1 = 0, ++ .veq_start_f2 = 1, .veq_len = 18, ++ ++ .vi_end_f1 = 20, .vi_end_f2 = 21, ++ .nbr_end = 240, ++ ++ .burst_ena = true, ++ .hburst_start = 72, .hburst_len = 34, ++ .vburst_start_f1 = 9, .vburst_end_f1 = 240, ++ .vburst_start_f2 = 10, .vburst_end_f2 = 240, ++ .vburst_start_f3 = 9, .vburst_end_f3 = 240, ++ .vburst_start_f4 = 10, .vburst_end_f4 = 240, ++ ++ /* desired 3.5800000 actual 3.5800000 clock 107.52 */ ++ .dda1_inc = 136, ++ .dda2_inc = 7624, .dda2_size = 20013, ++ .dda3_inc = 0, .dda3_size = 0, ++ .sc_reset = TV_SC_RESET_EVERY_4, ++ .pal_burst = false, ++ ++ .composite_levels = &pal_m_levels_composite, ++ .composite_color = &pal_m_csc_composite, ++ .svideo_levels = &pal_m_levels_svideo, ++ .svideo_color = &pal_m_csc_svideo, ++ ++ .filter_table = filter_table, ++ }, ++ { ++ /* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */ ++ .name = "PAL-N", ++ .clock = 107520, ++ .refresh = 25000, ++ .oversample = TV_OVERSAMPLE_8X, ++ .component_only = 0, ++ ++ .hsync_end = 64, .hblank_end = 128, ++ .hblank_start = 844, .htotal = 863, ++ ++ .progressive = false, .trilevel_sync = false, ++ ++ ++ .vsync_start_f1 = 6, .vsync_start_f2 = 7, ++ .vsync_len = 6, ++ ++ .veq_ena = true, .veq_start_f1 = 0, ++ .veq_start_f2 = 1, .veq_len = 18, ++ ++ .vi_end_f1 = 24, .vi_end_f2 = 25, ++ .nbr_end = 286, ++ ++ .burst_ena = true, ++ .hburst_start = 73, .hburst_len = 34, ++ .vburst_start_f1 = 8, .vburst_end_f1 = 285, ++ .vburst_start_f2 = 8, .vburst_end_f2 = 286, ++ .vburst_start_f3 = 9, .vburst_end_f3 = 286, ++ .vburst_start_f4 = 9, .vburst_end_f4 = 285, ++ ++ ++ /* desired 4.4336180 actual 4.4336180 clock 107.52 */ ++ .dda1_inc = 168, ++ .dda2_inc = 18557, .dda2_size = 20625, ++ .dda3_inc = 0, .dda3_size = 0, ++ .sc_reset = TV_SC_RESET_EVERY_8, ++ .pal_burst = true, ++ ++ .composite_levels = &pal_n_levels_composite, ++ .composite_color = &pal_n_csc_composite, ++ .svideo_levels = &pal_n_levels_svideo, ++ .svideo_color = &pal_n_csc_svideo, ++ ++ .filter_table = filter_table, ++ }, ++ { ++ /* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */ ++ .name = "PAL", ++ .clock = 107520, ++ .refresh = 25000, ++ .oversample = TV_OVERSAMPLE_8X, ++ .component_only = 0, ++ ++ .hsync_end = 64, .hblank_end = 128, ++ .hblank_start = 844, .htotal = 863, ++ ++ .progressive = false, .trilevel_sync = false, ++ ++ .vsync_start_f1 = 5, .vsync_start_f2 = 6, ++ .vsync_len = 5, ++ ++ .veq_ena = true, .veq_start_f1 = 0, ++ .veq_start_f2 = 1, .veq_len = 15, ++ ++ .vi_end_f1 = 24, .vi_end_f2 = 25, ++ .nbr_end = 286, ++ ++ .burst_ena = true, ++ .hburst_start = 73, .hburst_len = 32, ++ .vburst_start_f1 = 8, .vburst_end_f1 = 285, ++ .vburst_start_f2 = 8, .vburst_end_f2 = 286, ++ .vburst_start_f3 = 9, .vburst_end_f3 = 286, ++ .vburst_start_f4 = 9, .vburst_end_f4 = 285, ++ ++ /* desired 4.4336180 actual 4.4336180 clock 107.52 */ ++ .dda1_inc = 168, ++ .dda2_inc = 18557, .dda2_size = 20625, ++ .dda3_inc = 0, .dda3_size = 0, ++ .sc_reset = TV_SC_RESET_EVERY_8, ++ .pal_burst = true, ++ ++ .composite_levels = &pal_levels_composite, ++ .composite_color = &pal_csc_composite, ++ .svideo_levels = &pal_levels_svideo, ++ .svideo_color = &pal_csc_svideo, ++ ++ .filter_table = filter_table, ++ }, ++ { ++ .name = "480p@59.94Hz", ++ .clock = 107520, ++ .refresh = 59940, ++ .oversample = TV_OVERSAMPLE_4X, ++ .component_only = 1, ++ ++ .hsync_end = 64, .hblank_end = 122, ++ .hblank_start = 842, .htotal = 857, ++ ++ .progressive = true,.trilevel_sync = false, ++ ++ .vsync_start_f1 = 12, .vsync_start_f2 = 12, ++ .vsync_len = 12, ++ ++ .veq_ena = false, ++ ++ .vi_end_f1 = 44, .vi_end_f2 = 44, ++ .nbr_end = 496, ++ ++ .burst_ena = false, ++ ++ .filter_table = filter_table, ++ }, ++ { ++ .name = "480p@60Hz", ++ .clock = 107520, ++ .refresh = 60000, ++ .oversample = TV_OVERSAMPLE_4X, ++ .component_only = 1, ++ ++ .hsync_end = 64, .hblank_end = 122, ++ .hblank_start = 842, .htotal = 856, ++ ++ .progressive = true,.trilevel_sync = false, ++ ++ .vsync_start_f1 = 12, .vsync_start_f2 = 12, ++ .vsync_len = 12, ++ ++ .veq_ena = false, ++ ++ .vi_end_f1 = 44, .vi_end_f2 = 44, ++ .nbr_end = 496, ++ ++ .burst_ena = false, ++ ++ .filter_table = filter_table, ++ }, ++ { ++ .name = "576p", ++ .clock = 107520, ++ .refresh = 50000, ++ .oversample = TV_OVERSAMPLE_4X, ++ .component_only = 1, ++ ++ .hsync_end = 64, .hblank_end = 139, ++ .hblank_start = 859, .htotal = 863, ++ ++ .progressive = true, .trilevel_sync = false, ++ ++ .vsync_start_f1 = 10, .vsync_start_f2 = 10, ++ .vsync_len = 10, ++ ++ .veq_ena = false, ++ ++ .vi_end_f1 = 48, .vi_end_f2 = 48, ++ .nbr_end = 575, ++ ++ .burst_ena = false, ++ ++ .filter_table = filter_table, ++ }, ++ { ++ .name = "720p@60Hz", ++ .clock = 148800, ++ .refresh = 60000, ++ .oversample = TV_OVERSAMPLE_2X, ++ .component_only = 1, ++ ++ .hsync_end = 80, .hblank_end = 300, ++ .hblank_start = 1580, .htotal = 1649, ++ ++ .progressive = true, .trilevel_sync = true, ++ ++ .vsync_start_f1 = 10, .vsync_start_f2 = 10, ++ .vsync_len = 10, ++ ++ .veq_ena = false, ++ ++ .vi_end_f1 = 29, .vi_end_f2 = 29, ++ .nbr_end = 719, ++ ++ .burst_ena = false, ++ ++ .filter_table = filter_table, ++ }, ++ { ++ .name = "720p@59.94Hz", ++ .clock = 148800, ++ .refresh = 59940, ++ .oversample = TV_OVERSAMPLE_2X, ++ .component_only = 1, ++ ++ .hsync_end = 80, .hblank_end = 300, ++ .hblank_start = 1580, .htotal = 1651, ++ ++ .progressive = true, .trilevel_sync = true, ++ ++ .vsync_start_f1 = 10, .vsync_start_f2 = 10, ++ .vsync_len = 10, ++ ++ .veq_ena = false, ++ ++ .vi_end_f1 = 29, .vi_end_f2 = 29, ++ .nbr_end = 719, ++ ++ .burst_ena = false, ++ ++ .filter_table = filter_table, ++ }, ++ { ++ .name = "720p@50Hz", ++ .clock = 148800, ++ .refresh = 50000, ++ .oversample = TV_OVERSAMPLE_2X, ++ .component_only = 1, ++ ++ .hsync_end = 80, .hblank_end = 300, ++ .hblank_start = 1580, .htotal = 1979, ++ ++ .progressive = true, .trilevel_sync = true, ++ ++ .vsync_start_f1 = 10, .vsync_start_f2 = 10, ++ .vsync_len = 10, ++ ++ .veq_ena = false, ++ ++ .vi_end_f1 = 29, .vi_end_f2 = 29, ++ .nbr_end = 719, ++ ++ .burst_ena = false, ++ ++ .filter_table = filter_table, ++ .max_srcw = 800 ++ }, ++ { ++ .name = "1080i@50Hz", ++ .clock = 148800, ++ .refresh = 25000, ++ .oversample = TV_OVERSAMPLE_2X, ++ .component_only = 1, ++ ++ .hsync_end = 88, .hblank_end = 235, ++ .hblank_start = 2155, .htotal = 2639, ++ ++ .progressive = false, .trilevel_sync = true, ++ ++ .vsync_start_f1 = 4, .vsync_start_f2 = 5, ++ .vsync_len = 10, ++ ++ .veq_ena = true, .veq_start_f1 = 4, ++ .veq_start_f2 = 4, .veq_len = 10, ++ ++ ++ .vi_end_f1 = 21, .vi_end_f2 = 22, ++ .nbr_end = 539, ++ ++ .burst_ena = false, ++ ++ .filter_table = filter_table, ++ }, ++ { ++ .name = "1080i@60Hz", ++ .clock = 148800, ++ .refresh = 30000, ++ .oversample = TV_OVERSAMPLE_2X, ++ .component_only = 1, ++ ++ .hsync_end = 88, .hblank_end = 235, ++ .hblank_start = 2155, .htotal = 2199, ++ ++ .progressive = false, .trilevel_sync = true, ++ ++ .vsync_start_f1 = 4, .vsync_start_f2 = 5, ++ .vsync_len = 10, ++ ++ .veq_ena = true, .veq_start_f1 = 4, ++ .veq_start_f2 = 4, .veq_len = 10, ++ ++ ++ .vi_end_f1 = 21, .vi_end_f2 = 22, ++ .nbr_end = 539, ++ ++ .burst_ena = false, ++ ++ .filter_table = filter_table, ++ }, ++ { ++ .name = "1080i@59.94Hz", ++ .clock = 148800, ++ .refresh = 29970, ++ .oversample = TV_OVERSAMPLE_2X, ++ .component_only = 1, ++ ++ .hsync_end = 88, .hblank_end = 235, ++ .hblank_start = 2155, .htotal = 2200, ++ ++ .progressive = false, .trilevel_sync = true, ++ ++ .vsync_start_f1 = 4, .vsync_start_f2 = 5, ++ .vsync_len = 10, ++ ++ .veq_ena = true, .veq_start_f1 = 4, ++ .veq_start_f2 = 4, .veq_len = 10, ++ ++ ++ .vi_end_f1 = 21, .vi_end_f2 = 22, ++ .nbr_end = 539, ++ ++ .burst_ena = false, ++ ++ .filter_table = filter_table, ++ }, ++}; ++ ++#define NUM_TV_MODES sizeof(tv_modes) / sizeof (tv_modes[0]) ++ ++static void ++intel_tv_dpms(struct drm_encoder *encoder, int mode) ++{ ++ struct drm_device *dev = encoder->dev; ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ ++ switch(mode) { ++ case DRM_MODE_DPMS_ON: ++ I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE); ++ break; ++ case DRM_MODE_DPMS_STANDBY: ++ case DRM_MODE_DPMS_SUSPEND: ++ case DRM_MODE_DPMS_OFF: ++ I915_WRITE(TV_CTL, I915_READ(TV_CTL) & ~TV_ENC_ENABLE); ++ break; ++ } ++} ++ ++static void ++intel_tv_save(struct drm_connector *connector) ++{ ++ struct drm_device *dev = connector->dev; ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ struct intel_output *intel_output = to_intel_output(connector); ++ struct intel_tv_priv *tv_priv = intel_output->dev_priv; ++ int i; ++ ++ tv_priv->save_TV_H_CTL_1 = I915_READ(TV_H_CTL_1); ++ tv_priv->save_TV_H_CTL_2 = I915_READ(TV_H_CTL_2); ++ tv_priv->save_TV_H_CTL_3 = I915_READ(TV_H_CTL_3); ++ tv_priv->save_TV_V_CTL_1 = I915_READ(TV_V_CTL_1); ++ tv_priv->save_TV_V_CTL_2 = I915_READ(TV_V_CTL_2); ++ tv_priv->save_TV_V_CTL_3 = I915_READ(TV_V_CTL_3); ++ tv_priv->save_TV_V_CTL_4 = I915_READ(TV_V_CTL_4); ++ tv_priv->save_TV_V_CTL_5 = I915_READ(TV_V_CTL_5); ++ tv_priv->save_TV_V_CTL_6 = I915_READ(TV_V_CTL_6); ++ tv_priv->save_TV_V_CTL_7 = I915_READ(TV_V_CTL_7); ++ tv_priv->save_TV_SC_CTL_1 = I915_READ(TV_SC_CTL_1); ++ tv_priv->save_TV_SC_CTL_2 = I915_READ(TV_SC_CTL_2); ++ tv_priv->save_TV_SC_CTL_3 = I915_READ(TV_SC_CTL_3); ++ ++ tv_priv->save_TV_CSC_Y = I915_READ(TV_CSC_Y); ++ tv_priv->save_TV_CSC_Y2 = I915_READ(TV_CSC_Y2); ++ tv_priv->save_TV_CSC_U = I915_READ(TV_CSC_U); ++ tv_priv->save_TV_CSC_U2 = I915_READ(TV_CSC_U2); ++ tv_priv->save_TV_CSC_V = I915_READ(TV_CSC_V); ++ tv_priv->save_TV_CSC_V2 = I915_READ(TV_CSC_V2); ++ tv_priv->save_TV_CLR_KNOBS = I915_READ(TV_CLR_KNOBS); ++ tv_priv->save_TV_CLR_LEVEL = I915_READ(TV_CLR_LEVEL); ++ tv_priv->save_TV_WIN_POS = I915_READ(TV_WIN_POS); ++ tv_priv->save_TV_WIN_SIZE = I915_READ(TV_WIN_SIZE); ++ tv_priv->save_TV_FILTER_CTL_1 = I915_READ(TV_FILTER_CTL_1); ++ tv_priv->save_TV_FILTER_CTL_2 = I915_READ(TV_FILTER_CTL_2); ++ tv_priv->save_TV_FILTER_CTL_3 = I915_READ(TV_FILTER_CTL_3); ++ ++ for (i = 0; i < 60; i++) ++ tv_priv->save_TV_H_LUMA[i] = I915_READ(TV_H_LUMA_0 + (i <<2)); ++ for (i = 0; i < 60; i++) ++ tv_priv->save_TV_H_CHROMA[i] = I915_READ(TV_H_CHROMA_0 + (i <<2)); ++ for (i = 0; i < 43; i++) ++ tv_priv->save_TV_V_LUMA[i] = I915_READ(TV_V_LUMA_0 + (i <<2)); ++ for (i = 0; i < 43; i++) ++ tv_priv->save_TV_V_CHROMA[i] = I915_READ(TV_V_CHROMA_0 + (i <<2)); ++ ++ tv_priv->save_TV_DAC = I915_READ(TV_DAC); ++ tv_priv->save_TV_CTL = I915_READ(TV_CTL); ++} ++ ++static void ++intel_tv_restore(struct drm_connector *connector) ++{ ++ struct drm_device *dev = connector->dev; ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ struct intel_output *intel_output = to_intel_output(connector); ++ struct intel_tv_priv *tv_priv = intel_output->dev_priv; ++ struct drm_crtc *crtc = connector->encoder->crtc; ++ struct intel_crtc *intel_crtc; ++ int i; ++ ++ /* FIXME: No CRTC? */ ++ if (!crtc) ++ return; ++ ++ intel_crtc = to_intel_crtc(crtc); ++ I915_WRITE(TV_H_CTL_1, tv_priv->save_TV_H_CTL_1); ++ I915_WRITE(TV_H_CTL_2, tv_priv->save_TV_H_CTL_2); ++ I915_WRITE(TV_H_CTL_3, tv_priv->save_TV_H_CTL_3); ++ I915_WRITE(TV_V_CTL_1, tv_priv->save_TV_V_CTL_1); ++ I915_WRITE(TV_V_CTL_2, tv_priv->save_TV_V_CTL_2); ++ I915_WRITE(TV_V_CTL_3, tv_priv->save_TV_V_CTL_3); ++ I915_WRITE(TV_V_CTL_4, tv_priv->save_TV_V_CTL_4); ++ I915_WRITE(TV_V_CTL_5, tv_priv->save_TV_V_CTL_5); ++ I915_WRITE(TV_V_CTL_6, tv_priv->save_TV_V_CTL_6); ++ I915_WRITE(TV_V_CTL_7, tv_priv->save_TV_V_CTL_7); ++ I915_WRITE(TV_SC_CTL_1, tv_priv->save_TV_SC_CTL_1); ++ I915_WRITE(TV_SC_CTL_2, tv_priv->save_TV_SC_CTL_2); ++ I915_WRITE(TV_SC_CTL_3, tv_priv->save_TV_SC_CTL_3); ++ ++ I915_WRITE(TV_CSC_Y, tv_priv->save_TV_CSC_Y); ++ I915_WRITE(TV_CSC_Y2, tv_priv->save_TV_CSC_Y2); ++ I915_WRITE(TV_CSC_U, tv_priv->save_TV_CSC_U); ++ I915_WRITE(TV_CSC_U2, tv_priv->save_TV_CSC_U2); ++ I915_WRITE(TV_CSC_V, tv_priv->save_TV_CSC_V); ++ I915_WRITE(TV_CSC_V2, tv_priv->save_TV_CSC_V2); ++ I915_WRITE(TV_CLR_KNOBS, tv_priv->save_TV_CLR_KNOBS); ++ I915_WRITE(TV_CLR_LEVEL, tv_priv->save_TV_CLR_LEVEL); ++ ++ { ++ int pipeconf_reg = (intel_crtc->pipe == 0) ? ++ PIPEACONF : PIPEBCONF; ++ int dspcntr_reg = (intel_crtc->plane == 0) ? ++ DSPACNTR : DSPBCNTR; ++ int pipeconf = I915_READ(pipeconf_reg); ++ int dspcntr = I915_READ(dspcntr_reg); ++ int dspbase_reg = (intel_crtc->plane == 0) ? ++ DSPAADDR : DSPBADDR; ++ /* Pipe must be off here */ ++ I915_WRITE(dspcntr_reg, dspcntr & ~DISPLAY_PLANE_ENABLE); ++ /* Flush the plane changes */ ++ I915_WRITE(dspbase_reg, I915_READ(dspbase_reg)); ++ ++ if (!IS_I9XX(dev)) { ++ /* Wait for vblank for the disable to take effect */ ++ intel_wait_for_vblank(dev); ++ } ++ ++ I915_WRITE(pipeconf_reg, pipeconf & ~PIPEACONF_ENABLE); ++ /* Wait for vblank for the disable to take effect. */ ++ intel_wait_for_vblank(dev); ++ ++ /* Filter ctl must be set before TV_WIN_SIZE */ ++ I915_WRITE(TV_FILTER_CTL_1, tv_priv->save_TV_FILTER_CTL_1); ++ I915_WRITE(TV_FILTER_CTL_2, tv_priv->save_TV_FILTER_CTL_2); ++ I915_WRITE(TV_FILTER_CTL_3, tv_priv->save_TV_FILTER_CTL_3); ++ I915_WRITE(TV_WIN_POS, tv_priv->save_TV_WIN_POS); ++ I915_WRITE(TV_WIN_SIZE, tv_priv->save_TV_WIN_SIZE); ++ I915_WRITE(pipeconf_reg, pipeconf); ++ I915_WRITE(dspcntr_reg, dspcntr); ++ /* Flush the plane changes */ ++ I915_WRITE(dspbase_reg, I915_READ(dspbase_reg)); ++ } ++ ++ for (i = 0; i < 60; i++) ++ I915_WRITE(TV_H_LUMA_0 + (i <<2), tv_priv->save_TV_H_LUMA[i]); ++ for (i = 0; i < 60; i++) ++ I915_WRITE(TV_H_CHROMA_0 + (i <<2), tv_priv->save_TV_H_CHROMA[i]); ++ for (i = 0; i < 43; i++) ++ I915_WRITE(TV_V_LUMA_0 + (i <<2), tv_priv->save_TV_V_LUMA[i]); ++ for (i = 0; i < 43; i++) ++ I915_WRITE(TV_V_CHROMA_0 + (i <<2), tv_priv->save_TV_V_CHROMA[i]); ++ ++ I915_WRITE(TV_DAC, tv_priv->save_TV_DAC); ++ I915_WRITE(TV_CTL, tv_priv->save_TV_CTL); ++} ++ ++static const struct tv_mode * ++intel_tv_mode_lookup (char *tv_format) ++{ ++ int i; ++ ++ for (i = 0; i < sizeof(tv_modes) / sizeof (tv_modes[0]); i++) { ++ const struct tv_mode *tv_mode = &tv_modes[i]; ++ ++ if (!strcmp(tv_format, tv_mode->name)) ++ return tv_mode; ++ } ++ return NULL; ++} ++ ++static const struct tv_mode * ++intel_tv_mode_find (struct intel_output *intel_output) ++{ ++ struct intel_tv_priv *tv_priv = intel_output->dev_priv; ++ ++ return intel_tv_mode_lookup(tv_priv->tv_format); ++} ++ ++static enum drm_mode_status ++intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) ++{ ++ struct intel_output *intel_output = to_intel_output(connector); ++ const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); ++ ++ /* Ensure TV refresh is close to desired refresh */ ++ if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode)) < 1) ++ return MODE_OK; ++ return MODE_CLOCK_RANGE; ++} ++ ++ ++static bool ++intel_tv_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, ++ struct drm_display_mode *adjusted_mode) ++{ ++ struct drm_device *dev = encoder->dev; ++ struct drm_mode_config *drm_config = &dev->mode_config; ++ struct intel_output *intel_output = enc_to_intel_output(encoder); ++ const struct tv_mode *tv_mode = intel_tv_mode_find (intel_output); ++ struct drm_encoder *other_encoder; ++ ++ if (!tv_mode) ++ return false; ++ ++ /* FIXME: lock encoder list */ ++ list_for_each_entry(other_encoder, &drm_config->encoder_list, head) { ++ if (other_encoder != encoder && ++ other_encoder->crtc == encoder->crtc) ++ return false; ++ } ++ ++ adjusted_mode->clock = tv_mode->clock; ++ return true; ++} ++ ++static void ++intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, ++ struct drm_display_mode *adjusted_mode) ++{ ++ struct drm_device *dev = encoder->dev; ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ struct drm_crtc *crtc = encoder->crtc; ++ struct intel_crtc *intel_crtc = to_intel_crtc(crtc); ++ struct intel_output *intel_output = enc_to_intel_output(encoder); ++ struct intel_tv_priv *tv_priv = intel_output->dev_priv; ++ const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); ++ u32 tv_ctl; ++ u32 hctl1, hctl2, hctl3; ++ u32 vctl1, vctl2, vctl3, vctl4, vctl5, vctl6, vctl7; ++ u32 scctl1, scctl2, scctl3; ++ int i, j; ++ const struct video_levels *video_levels; ++ const struct color_conversion *color_conversion; ++ bool burst_ena; ++ ++ if (!tv_mode) ++ return; /* can't happen (mode_prepare prevents this) */ ++ ++ tv_ctl = 0; ++ ++ switch (tv_priv->type) { ++ default: ++ case DRM_MODE_CONNECTOR_Unknown: ++ case DRM_MODE_CONNECTOR_Composite: ++ tv_ctl |= TV_ENC_OUTPUT_COMPOSITE; ++ video_levels = tv_mode->composite_levels; ++ color_conversion = tv_mode->composite_color; ++ burst_ena = tv_mode->burst_ena; ++ break; ++ case DRM_MODE_CONNECTOR_Component: ++ tv_ctl |= TV_ENC_OUTPUT_COMPONENT; ++ video_levels = &component_levels; ++ if (tv_mode->burst_ena) ++ color_conversion = &sdtv_csc_yprpb; ++ else ++ color_conversion = &hdtv_csc_yprpb; ++ burst_ena = false; ++ break; ++ case DRM_MODE_CONNECTOR_SVIDEO: ++ tv_ctl |= TV_ENC_OUTPUT_SVIDEO; ++ video_levels = tv_mode->svideo_levels; ++ color_conversion = tv_mode->svideo_color; ++ burst_ena = tv_mode->burst_ena; ++ break; ++ } ++ hctl1 = (tv_mode->hsync_end << TV_HSYNC_END_SHIFT) | ++ (tv_mode->htotal << TV_HTOTAL_SHIFT); ++ ++ hctl2 = (tv_mode->hburst_start << 16) | ++ (tv_mode->hburst_len << TV_HBURST_LEN_SHIFT); ++ ++ if (burst_ena) ++ hctl2 |= TV_BURST_ENA; ++ ++ hctl3 = (tv_mode->hblank_start << TV_HBLANK_START_SHIFT) | ++ (tv_mode->hblank_end << TV_HBLANK_END_SHIFT); ++ ++ vctl1 = (tv_mode->nbr_end << TV_NBR_END_SHIFT) | ++ (tv_mode->vi_end_f1 << TV_VI_END_F1_SHIFT) | ++ (tv_mode->vi_end_f2 << TV_VI_END_F2_SHIFT); ++ ++ vctl2 = (tv_mode->vsync_len << TV_VSYNC_LEN_SHIFT) | ++ (tv_mode->vsync_start_f1 << TV_VSYNC_START_F1_SHIFT) | ++ (tv_mode->vsync_start_f2 << TV_VSYNC_START_F2_SHIFT); ++ ++ vctl3 = (tv_mode->veq_len << TV_VEQ_LEN_SHIFT) | ++ (tv_mode->veq_start_f1 << TV_VEQ_START_F1_SHIFT) | ++ (tv_mode->veq_start_f2 << TV_VEQ_START_F2_SHIFT); ++ ++ if (tv_mode->veq_ena) ++ vctl3 |= TV_EQUAL_ENA; ++ ++ vctl4 = (tv_mode->vburst_start_f1 << TV_VBURST_START_F1_SHIFT) | ++ (tv_mode->vburst_end_f1 << TV_VBURST_END_F1_SHIFT); ++ ++ vctl5 = (tv_mode->vburst_start_f2 << TV_VBURST_START_F2_SHIFT) | ++ (tv_mode->vburst_end_f2 << TV_VBURST_END_F2_SHIFT); ++ ++ vctl6 = (tv_mode->vburst_start_f3 << TV_VBURST_START_F3_SHIFT) | ++ (tv_mode->vburst_end_f3 << TV_VBURST_END_F3_SHIFT); ++ ++ vctl7 = (tv_mode->vburst_start_f4 << TV_VBURST_START_F4_SHIFT) | ++ (tv_mode->vburst_end_f4 << TV_VBURST_END_F4_SHIFT); ++ ++ if (intel_crtc->pipe == 1) ++ tv_ctl |= TV_ENC_PIPEB_SELECT; ++ tv_ctl |= tv_mode->oversample; ++ ++ if (tv_mode->progressive) ++ tv_ctl |= TV_PROGRESSIVE; ++ if (tv_mode->trilevel_sync) ++ tv_ctl |= TV_TRILEVEL_SYNC; ++ if (tv_mode->pal_burst) ++ tv_ctl |= TV_PAL_BURST; ++ scctl1 = 0; ++ /* dda1 implies valid video levels */ ++ if (tv_mode->dda1_inc) { ++ scctl1 |= TV_SC_DDA1_EN; ++ scctl1 |= video_levels->burst << TV_BURST_LEVEL_SHIFT; ++ } ++ ++ if (tv_mode->dda2_inc) ++ scctl1 |= TV_SC_DDA2_EN; ++ ++ if (tv_mode->dda3_inc) ++ scctl1 |= TV_SC_DDA3_EN; ++ ++ scctl1 |= tv_mode->sc_reset; ++ scctl1 |= tv_mode->dda1_inc << TV_SCDDA1_INC_SHIFT; ++ ++ scctl2 = tv_mode->dda2_size << TV_SCDDA2_SIZE_SHIFT | ++ tv_mode->dda2_inc << TV_SCDDA2_INC_SHIFT; ++ ++ scctl3 = tv_mode->dda3_size << TV_SCDDA3_SIZE_SHIFT | ++ tv_mode->dda3_inc << TV_SCDDA3_INC_SHIFT; ++ ++ /* Enable two fixes for the chips that need them. */ ++ if (dev->pci_device < 0x2772) ++ tv_ctl |= TV_ENC_C0_FIX | TV_ENC_SDP_FIX; ++ ++ I915_WRITE(TV_H_CTL_1, hctl1); ++ I915_WRITE(TV_H_CTL_2, hctl2); ++ I915_WRITE(TV_H_CTL_3, hctl3); ++ I915_WRITE(TV_V_CTL_1, vctl1); ++ I915_WRITE(TV_V_CTL_2, vctl2); ++ I915_WRITE(TV_V_CTL_3, vctl3); ++ I915_WRITE(TV_V_CTL_4, vctl4); ++ I915_WRITE(TV_V_CTL_5, vctl5); ++ I915_WRITE(TV_V_CTL_6, vctl6); ++ I915_WRITE(TV_V_CTL_7, vctl7); ++ I915_WRITE(TV_SC_CTL_1, scctl1); ++ I915_WRITE(TV_SC_CTL_2, scctl2); ++ I915_WRITE(TV_SC_CTL_3, scctl3); ++ ++ if (color_conversion) { ++ I915_WRITE(TV_CSC_Y, (color_conversion->ry << 16) | ++ color_conversion->gy); ++ I915_WRITE(TV_CSC_Y2,(color_conversion->by << 16) | ++ color_conversion->ay); ++ I915_WRITE(TV_CSC_U, (color_conversion->ru << 16) | ++ color_conversion->gu); ++ I915_WRITE(TV_CSC_U2, (color_conversion->bu << 16) | ++ color_conversion->au); ++ I915_WRITE(TV_CSC_V, (color_conversion->rv << 16) | ++ color_conversion->gv); ++ I915_WRITE(TV_CSC_V2, (color_conversion->bv << 16) | ++ color_conversion->av); ++ } ++ ++ I915_WRITE(TV_CLR_KNOBS, 0x00606000); ++ if (video_levels) ++ I915_WRITE(TV_CLR_LEVEL, ++ ((video_levels->black << TV_BLACK_LEVEL_SHIFT) | ++ (video_levels->blank << TV_BLANK_LEVEL_SHIFT))); ++ { ++ int pipeconf_reg = (intel_crtc->pipe == 0) ? ++ PIPEACONF : PIPEBCONF; ++ int dspcntr_reg = (intel_crtc->plane == 0) ? ++ DSPACNTR : DSPBCNTR; ++ int pipeconf = I915_READ(pipeconf_reg); ++ int dspcntr = I915_READ(dspcntr_reg); ++ int dspbase_reg = (intel_crtc->plane == 0) ? ++ DSPAADDR : DSPBADDR; ++ int xpos = 0x0, ypos = 0x0; ++ unsigned int xsize, ysize; ++ /* Pipe must be off here */ ++ I915_WRITE(dspcntr_reg, dspcntr & ~DISPLAY_PLANE_ENABLE); ++ /* Flush the plane changes */ ++ I915_WRITE(dspbase_reg, I915_READ(dspbase_reg)); ++ ++ /* Wait for vblank for the disable to take effect */ ++ if (!IS_I9XX(dev)) ++ intel_wait_for_vblank(dev); ++ ++ I915_WRITE(pipeconf_reg, pipeconf & ~PIPEACONF_ENABLE); ++ /* Wait for vblank for the disable to take effect. */ ++ intel_wait_for_vblank(dev); ++ ++ /* Filter ctl must be set before TV_WIN_SIZE */ ++ I915_WRITE(TV_FILTER_CTL_1, TV_AUTO_SCALE); ++ xsize = tv_mode->hblank_start - tv_mode->hblank_end; ++ if (tv_mode->progressive) ++ ysize = tv_mode->nbr_end + 1; ++ else ++ ysize = 2*tv_mode->nbr_end + 1; ++ ++ xpos += tv_priv->margin[TV_MARGIN_LEFT]; ++ ypos += tv_priv->margin[TV_MARGIN_TOP]; ++ xsize -= (tv_priv->margin[TV_MARGIN_LEFT] + ++ tv_priv->margin[TV_MARGIN_RIGHT]); ++ ysize -= (tv_priv->margin[TV_MARGIN_TOP] + ++ tv_priv->margin[TV_MARGIN_BOTTOM]); ++ I915_WRITE(TV_WIN_POS, (xpos<<16)|ypos); ++ I915_WRITE(TV_WIN_SIZE, (xsize<<16)|ysize); ++ ++ I915_WRITE(pipeconf_reg, pipeconf); ++ I915_WRITE(dspcntr_reg, dspcntr); ++ /* Flush the plane changes */ ++ I915_WRITE(dspbase_reg, I915_READ(dspbase_reg)); ++ } ++ ++ j = 0; ++ for (i = 0; i < 60; i++) ++ I915_WRITE(TV_H_LUMA_0 + (i<<2), tv_mode->filter_table[j++]); ++ for (i = 0; i < 60; i++) ++ I915_WRITE(TV_H_CHROMA_0 + (i<<2), tv_mode->filter_table[j++]); ++ for (i = 0; i < 43; i++) ++ I915_WRITE(TV_V_LUMA_0 + (i<<2), tv_mode->filter_table[j++]); ++ for (i = 0; i < 43; i++) ++ I915_WRITE(TV_V_CHROMA_0 + (i<<2), tv_mode->filter_table[j++]); ++ I915_WRITE(TV_DAC, 0); ++ I915_WRITE(TV_CTL, tv_ctl); ++} ++ ++static const struct drm_display_mode reported_modes[] = { ++ { ++ .name = "NTSC 480i", ++ .clock = 107520, ++ .hdisplay = 1280, ++ .hsync_start = 1368, ++ .hsync_end = 1496, ++ .htotal = 1712, ++ ++ .vdisplay = 1024, ++ .vsync_start = 1027, ++ .vsync_end = 1034, ++ .vtotal = 1104, ++ .type = DRM_MODE_TYPE_DRIVER, ++ }, ++}; ++ ++/** ++ * Detects TV presence by checking for load. ++ * ++ * Requires that the current pipe's DPLL is active. ++ ++ * \return true if TV is connected. ++ * \return false if TV is disconnected. ++ */ ++static int ++intel_tv_detect_type (struct drm_crtc *crtc, struct intel_output *intel_output) ++{ ++ struct drm_encoder *encoder = &intel_output->enc; ++ struct drm_device *dev = encoder->dev; ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ unsigned long irqflags; ++ u32 tv_ctl, save_tv_ctl; ++ u32 tv_dac, save_tv_dac; ++ int type = DRM_MODE_CONNECTOR_Unknown; ++ ++ tv_dac = I915_READ(TV_DAC); ++ ++ /* Disable TV interrupts around load detect or we'll recurse */ ++ spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); ++ i915_disable_pipestat(dev_priv, 0, PIPE_HOTPLUG_INTERRUPT_ENABLE | ++ PIPE_HOTPLUG_TV_INTERRUPT_ENABLE); ++ spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); ++ ++ /* ++ * Detect TV by polling) ++ */ ++ if (intel_output->load_detect_temp) { ++ /* TV not currently running, prod it with destructive detect */ ++ save_tv_dac = tv_dac; ++ tv_ctl = I915_READ(TV_CTL); ++ save_tv_ctl = tv_ctl; ++ tv_ctl &= ~TV_ENC_ENABLE; ++ tv_ctl &= ~TV_TEST_MODE_MASK; ++ tv_ctl |= TV_TEST_MODE_MONITOR_DETECT; ++ tv_dac &= ~TVDAC_SENSE_MASK; ++ tv_dac |= (TVDAC_STATE_CHG_EN | ++ TVDAC_A_SENSE_CTL | ++ TVDAC_B_SENSE_CTL | ++ TVDAC_C_SENSE_CTL | ++ DAC_CTL_OVERRIDE | ++ DAC_A_0_7_V | ++ DAC_B_0_7_V | ++ DAC_C_0_7_V); ++ I915_WRITE(TV_CTL, tv_ctl); ++ I915_WRITE(TV_DAC, tv_dac); ++ intel_wait_for_vblank(dev); ++ tv_dac = I915_READ(TV_DAC); ++ I915_WRITE(TV_DAC, save_tv_dac); ++ I915_WRITE(TV_CTL, save_tv_ctl); ++ } ++ /* ++ * A B C ++ * 0 1 1 Composite ++ * 1 0 X svideo ++ * 0 0 0 Component ++ */ ++ if ((tv_dac & TVDAC_SENSE_MASK) == (TVDAC_B_SENSE | TVDAC_C_SENSE)) { ++ DRM_DEBUG("Detected Composite TV connection\n"); ++ type = DRM_MODE_CONNECTOR_Composite; ++ } else if ((tv_dac & (TVDAC_A_SENSE|TVDAC_B_SENSE)) == TVDAC_A_SENSE) { ++ DRM_DEBUG("Detected S-Video TV connection\n"); ++ type = DRM_MODE_CONNECTOR_SVIDEO; ++ } else if ((tv_dac & TVDAC_SENSE_MASK) == 0) { ++ DRM_DEBUG("Detected Component TV connection\n"); ++ type = DRM_MODE_CONNECTOR_Component; ++ } else { ++ DRM_DEBUG("No TV connection detected\n"); ++ type = -1; ++ } ++ ++ /* Restore interrupt config */ ++ spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); ++ i915_enable_pipestat(dev_priv, 0, PIPE_HOTPLUG_INTERRUPT_ENABLE | ++ PIPE_HOTPLUG_TV_INTERRUPT_ENABLE); ++ spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); ++ ++ return type; ++} ++ ++/** ++ * Detect the TV connection. ++ * ++ * Currently this always returns CONNECTOR_STATUS_UNKNOWN, as we need to be sure ++ * we have a pipe programmed in order to probe the TV. ++ */ ++static enum drm_connector_status ++intel_tv_detect(struct drm_connector *connector) ++{ ++ struct drm_crtc *crtc; ++ struct drm_display_mode mode; ++ struct intel_output *intel_output = to_intel_output(connector); ++ struct intel_tv_priv *tv_priv = intel_output->dev_priv; ++ struct drm_encoder *encoder = &intel_output->enc; ++ int dpms_mode; ++ int type = tv_priv->type; ++ ++ mode = reported_modes[0]; ++ drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V); ++ ++ if (encoder->crtc) { ++ type = intel_tv_detect_type(encoder->crtc, intel_output); ++ } else { ++ crtc = intel_get_load_detect_pipe(intel_output, &mode, &dpms_mode); ++ if (crtc) { ++ type = intel_tv_detect_type(crtc, intel_output); ++ intel_release_load_detect_pipe(intel_output, dpms_mode); ++ } else ++ type = -1; ++ } ++ ++ if (type < 0) ++ return connector_status_disconnected; ++ ++ return connector_status_connected; ++} ++ ++static struct input_res { ++ char *name; ++ int w, h; ++} input_res_table[] = ++{ ++ {"640x480", 640, 480}, ++ {"800x600", 800, 600}, ++ {"1024x768", 1024, 768}, ++ {"1280x1024", 1280, 1024}, ++ {"848x480", 848, 480}, ++ {"1280x720", 1280, 720}, ++ {"1920x1080", 1920, 1080}, ++}; ++ ++/** ++ * Stub get_modes function. ++ * ++ * This should probably return a set of fixed modes, unless we can figure out ++ * how to probe modes off of TV connections. ++ */ ++ ++static int ++intel_tv_get_modes(struct drm_connector *connector) ++{ ++ struct drm_display_mode *mode_ptr; ++ struct intel_output *intel_output = to_intel_output(connector); ++ const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); ++ int j; ++ ++ for (j = 0; j < sizeof(input_res_table) / sizeof(input_res_table[0]); ++ j++) { ++ struct input_res *input = &input_res_table[j]; ++ unsigned int hactive_s = input->w; ++ unsigned int vactive_s = input->h; ++ ++ if (tv_mode->max_srcw && input->w > tv_mode->max_srcw) ++ continue; ++ ++ if (input->w > 1024 && (!tv_mode->progressive ++ && !tv_mode->component_only)) ++ continue; ++ ++ mode_ptr = drm_calloc(1, sizeof(struct drm_display_mode), ++ DRM_MEM_DRIVER); ++ strncpy(mode_ptr->name, input->name, DRM_DISPLAY_MODE_LEN); ++ ++ mode_ptr->hdisplay = hactive_s; ++ mode_ptr->hsync_start = hactive_s + 1; ++ mode_ptr->hsync_end = hactive_s + 64; ++ if (mode_ptr->hsync_end <= mode_ptr->hsync_start) ++ mode_ptr->hsync_end = mode_ptr->hsync_start + 1; ++ mode_ptr->htotal = hactive_s + 96; ++ ++ mode_ptr->vdisplay = vactive_s; ++ mode_ptr->vsync_start = vactive_s + 1; ++ mode_ptr->vsync_end = vactive_s + 32; ++ if (mode_ptr->vsync_end <= mode_ptr->vsync_start) ++ mode_ptr->vsync_end = mode_ptr->vsync_start + 1; ++ mode_ptr->vtotal = vactive_s + 33; ++ ++ mode_ptr->clock = (int) (tv_mode->refresh * ++ mode_ptr->vtotal * ++ mode_ptr->htotal / 1000) / 1000; ++ ++ mode_ptr->type = DRM_MODE_TYPE_DRIVER; ++ drm_mode_probed_add(connector, mode_ptr); ++ } ++ ++ return 0; ++} ++ ++static void ++intel_tv_destroy (struct drm_connector *connector) ++{ ++ struct intel_output *intel_output = to_intel_output(connector); ++ ++ drm_sysfs_connector_remove(connector); ++ drm_connector_cleanup(connector); ++ drm_free(intel_output, sizeof(struct intel_output) + sizeof(struct intel_tv_priv), ++ DRM_MEM_DRIVER); ++} ++ ++ ++static int ++intel_tv_set_property(struct drm_connector *connector, struct drm_property *property, ++ uint64_t val) ++{ ++ struct drm_device *dev = connector->dev; ++ struct intel_output *intel_output = to_intel_output(connector); ++ struct intel_tv_priv *tv_priv = intel_output->dev_priv; ++ int ret = 0; ++ ++ ret = drm_connector_property_set_value(connector, property, val); ++ if (ret < 0) ++ goto out; ++ ++ if (property == dev->mode_config.tv_left_margin_property) ++ tv_priv->margin[TV_MARGIN_LEFT] = val; ++ else if (property == dev->mode_config.tv_right_margin_property) ++ tv_priv->margin[TV_MARGIN_RIGHT] = val; ++ else if (property == dev->mode_config.tv_top_margin_property) ++ tv_priv->margin[TV_MARGIN_TOP] = val; ++ else if (property == dev->mode_config.tv_bottom_margin_property) ++ tv_priv->margin[TV_MARGIN_BOTTOM] = val; ++ else if (property == dev->mode_config.tv_mode_property) { ++ if (val >= NUM_TV_MODES) { ++ ret = -EINVAL; ++ goto out; ++ } ++ tv_priv->tv_format = tv_modes[val].name; ++ intel_tv_mode_set(&intel_output->enc, NULL, NULL); ++ } else { ++ ret = -EINVAL; ++ goto out; ++ } ++ ++ intel_tv_mode_set(&intel_output->enc, NULL, NULL); ++out: ++ return ret; ++} ++ ++static const struct drm_encoder_helper_funcs intel_tv_helper_funcs = { ++ .dpms = intel_tv_dpms, ++ .mode_fixup = intel_tv_mode_fixup, ++ .prepare = intel_encoder_prepare, ++ .mode_set = intel_tv_mode_set, ++ .commit = intel_encoder_commit, ++}; ++ ++static const struct drm_connector_funcs intel_tv_connector_funcs = { ++ .save = intel_tv_save, ++ .restore = intel_tv_restore, ++ .detect = intel_tv_detect, ++ .destroy = intel_tv_destroy, ++ .set_property = intel_tv_set_property, ++ .fill_modes = drm_helper_probe_single_connector_modes, ++}; ++ ++static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs = { ++ .mode_valid = intel_tv_mode_valid, ++ .get_modes = intel_tv_get_modes, ++ .best_encoder = intel_best_encoder, ++}; ++ ++void intel_tv_enc_destroy(struct drm_encoder *encoder) ++{ ++ drm_encoder_cleanup(encoder); ++} ++ ++static const struct drm_encoder_funcs intel_tv_enc_funcs = { ++ .destroy = intel_tv_enc_destroy, ++}; ++ ++ ++void ++intel_tv_init(struct drm_device *dev) ++{ ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ struct drm_connector *connector; ++ struct intel_output *intel_output; ++ struct intel_tv_priv *tv_priv; ++ u32 tv_dac_on, tv_dac_off, save_tv_dac; ++ char **tv_format_names; ++ int i, initial_mode = 0; ++ ++ if ((I915_READ(TV_CTL) & TV_FUSE_STATE_MASK) == TV_FUSE_STATE_DISABLED) ++ return; ++ ++ /* Even if we have an encoder we may not have a connector */ ++ if (!dev_priv->int_tv_support) ++ return; ++ ++ /* ++ * Sanity check the TV output by checking to see if the ++ * DAC register holds a value ++ */ ++ save_tv_dac = I915_READ(TV_DAC); ++ ++ I915_WRITE(TV_DAC, save_tv_dac | TVDAC_STATE_CHG_EN); ++ tv_dac_on = I915_READ(TV_DAC); ++ ++ I915_WRITE(TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN); ++ tv_dac_off = I915_READ(TV_DAC); ++ ++ I915_WRITE(TV_DAC, save_tv_dac); ++ ++ /* ++ * If the register does not hold the state change enable ++ * bit, (either as a 0 or a 1), assume it doesn't really ++ * exist ++ */ ++ if ((tv_dac_on & TVDAC_STATE_CHG_EN) == 0 || ++ (tv_dac_off & TVDAC_STATE_CHG_EN) != 0) ++ return; ++ ++ intel_output = drm_calloc(1, sizeof(struct intel_output) + ++ sizeof(struct intel_tv_priv), DRM_MEM_DRIVER); ++ if (!intel_output) { ++ return; ++ } ++ connector = &intel_output->base; ++ ++ drm_connector_init(dev, connector, &intel_tv_connector_funcs, ++ DRM_MODE_CONNECTOR_SVIDEO); ++ ++ drm_encoder_init(dev, &intel_output->enc, &intel_tv_enc_funcs, ++ DRM_MODE_ENCODER_TVDAC); ++ ++ drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc); ++ tv_priv = (struct intel_tv_priv *)(intel_output + 1); ++ intel_output->type = INTEL_OUTPUT_TVOUT; ++ intel_output->enc.possible_crtcs = ((1 << 0) | (1 << 1)); ++ intel_output->enc.possible_clones = (1 << INTEL_OUTPUT_TVOUT); ++ intel_output->dev_priv = tv_priv; ++ tv_priv->type = DRM_MODE_CONNECTOR_Unknown; ++ ++ /* BIOS margin values */ ++ tv_priv->margin[TV_MARGIN_LEFT] = 54; ++ tv_priv->margin[TV_MARGIN_TOP] = 36; ++ tv_priv->margin[TV_MARGIN_RIGHT] = 46; ++ tv_priv->margin[TV_MARGIN_BOTTOM] = 37; ++ ++ tv_priv->tv_format = kstrdup(tv_modes[initial_mode].name, GFP_KERNEL); ++ ++ drm_encoder_helper_add(&intel_output->enc, &intel_tv_helper_funcs); ++ drm_connector_helper_add(connector, &intel_tv_connector_helper_funcs); ++ connector->interlace_allowed = false; ++ connector->doublescan_allowed = false; ++ ++ /* Create TV properties then attach current values */ ++ tv_format_names = drm_alloc(sizeof(char *) * NUM_TV_MODES, ++ DRM_MEM_DRIVER); ++ if (!tv_format_names) ++ goto out; ++ for (i = 0; i < NUM_TV_MODES; i++) ++ tv_format_names[i] = tv_modes[i].name; ++ drm_mode_create_tv_properties(dev, NUM_TV_MODES, tv_format_names); ++ ++ drm_connector_attach_property(connector, dev->mode_config.tv_mode_property, ++ initial_mode); ++ drm_connector_attach_property(connector, ++ dev->mode_config.tv_left_margin_property, ++ tv_priv->margin[TV_MARGIN_LEFT]); ++ drm_connector_attach_property(connector, ++ dev->mode_config.tv_top_margin_property, ++ tv_priv->margin[TV_MARGIN_TOP]); ++ drm_connector_attach_property(connector, ++ dev->mode_config.tv_right_margin_property, ++ tv_priv->margin[TV_MARGIN_RIGHT]); ++ drm_connector_attach_property(connector, ++ dev->mode_config.tv_bottom_margin_property, ++ tv_priv->margin[TV_MARGIN_BOTTOM]); ++out: ++ drm_sysfs_connector_add(connector); ++} +diff --git a/drivers/gpu/drm/radeon/r300_cmdbuf.c b/drivers/gpu/drm/radeon/r300_cmdbuf.c +index 4b27d9a..cace396 100644 +--- a/drivers/gpu/drm/radeon/r300_cmdbuf.c ++++ b/drivers/gpu/drm/radeon/r300_cmdbuf.c +@@ -860,12 +860,12 @@ static __inline__ void r300_pacify(drm_radeon_private_t *dev_priv) + * The actual age emit is done by r300_do_cp_cmdbuf, which is why you must + * be careful about how this function is called. + */ +-static void r300_discard_buffer(struct drm_device * dev, struct drm_buf * buf) ++static void r300_discard_buffer(struct drm_device *dev, struct drm_master *master, struct drm_buf *buf) + { +- drm_radeon_private_t *dev_priv = dev->dev_private; + drm_radeon_buf_priv_t *buf_priv = buf->dev_private; ++ struct drm_radeon_master_private *master_priv = master->driver_priv; + +- buf_priv->age = ++dev_priv->sarea_priv->last_dispatch; ++ buf_priv->age = ++master_priv->sarea_priv->last_dispatch; + buf->pending = 1; + buf->used = 0; + } +@@ -1027,6 +1027,7 @@ int r300_do_cp_cmdbuf(struct drm_device *dev, + drm_radeon_kcmd_buffer_t *cmdbuf) + { + drm_radeon_private_t *dev_priv = dev->dev_private; ++ struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv; + struct drm_device_dma *dma = dev->dma; + struct drm_buf *buf = NULL; + int emit_dispatch_age = 0; +@@ -1134,7 +1135,7 @@ int r300_do_cp_cmdbuf(struct drm_device *dev, + } + + emit_dispatch_age = 1; +- r300_discard_buffer(dev, buf); ++ r300_discard_buffer(dev, file_priv->master, buf); + break; + + case R300_CMD_WAIT: +@@ -1189,7 +1190,7 @@ int r300_do_cp_cmdbuf(struct drm_device *dev, + + /* Emit the vertex buffer age */ + BEGIN_RING(2); +- RADEON_DISPATCH_AGE(dev_priv->sarea_priv->last_dispatch); ++ RADEON_DISPATCH_AGE(master_priv->sarea_priv->last_dispatch); + ADVANCE_RING(); + } + +diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c +index dcebb4b..63212d7 100644 +--- a/drivers/gpu/drm/radeon/radeon_cp.c ++++ b/drivers/gpu/drm/radeon/radeon_cp.c +@@ -31,6 +31,7 @@ + + #include "drmP.h" + #include "drm.h" ++#include "drm_sarea.h" + #include "radeon_drm.h" + #include "radeon_drv.h" + #include "r300_reg.h" +@@ -667,15 +668,14 @@ static void radeon_cp_init_ring_buffer(struct drm_device * dev, + RADEON_WRITE(RADEON_BUS_CNTL, tmp); + } /* PCIE cards appears to not need this */ + +- dev_priv->sarea_priv->last_frame = dev_priv->scratch[0] = 0; +- RADEON_WRITE(RADEON_LAST_FRAME_REG, dev_priv->sarea_priv->last_frame); ++ dev_priv->scratch[0] = 0; ++ RADEON_WRITE(RADEON_LAST_FRAME_REG, 0); + +- dev_priv->sarea_priv->last_dispatch = dev_priv->scratch[1] = 0; +- RADEON_WRITE(RADEON_LAST_DISPATCH_REG, +- dev_priv->sarea_priv->last_dispatch); ++ dev_priv->scratch[1] = 0; ++ RADEON_WRITE(RADEON_LAST_DISPATCH_REG, 0); + +- dev_priv->sarea_priv->last_clear = dev_priv->scratch[2] = 0; +- RADEON_WRITE(RADEON_LAST_CLEAR_REG, dev_priv->sarea_priv->last_clear); ++ dev_priv->scratch[2] = 0; ++ RADEON_WRITE(RADEON_LAST_CLEAR_REG, 0); + + radeon_do_wait_for_idle(dev_priv); + +@@ -871,9 +871,11 @@ static void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on) + } + } + +-static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init) ++static int radeon_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init, ++ struct drm_file *file_priv) + { + drm_radeon_private_t *dev_priv = dev->dev_private; ++ struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv; + + DRM_DEBUG("\n"); + +@@ -998,8 +1000,8 @@ static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init) + dev_priv->buffers_offset = init->buffers_offset; + dev_priv->gart_textures_offset = init->gart_textures_offset; + +- dev_priv->sarea = drm_getsarea(dev); +- if (!dev_priv->sarea) { ++ master_priv->sarea = drm_getsarea(dev); ++ if (!master_priv->sarea) { + DRM_ERROR("could not find sarea!\n"); + radeon_do_cleanup_cp(dev); + return -EINVAL; +@@ -1035,10 +1037,6 @@ static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init) + } + } + +- dev_priv->sarea_priv = +- (drm_radeon_sarea_t *) ((u8 *) dev_priv->sarea->handle + +- init->sarea_priv_offset); +- + #if __OS_HAS_AGP + if (dev_priv->flags & RADEON_IS_AGP) { + drm_core_ioremap(dev_priv->cp_ring, dev); +@@ -1329,7 +1327,7 @@ int radeon_cp_init(struct drm_device *dev, void *data, struct drm_file *file_pri + case RADEON_INIT_CP: + case RADEON_INIT_R200_CP: + case RADEON_INIT_R300_CP: +- return radeon_do_init_cp(dev, init); ++ return radeon_do_init_cp(dev, init, file_priv); + case RADEON_CLEANUP_CP: + return radeon_do_cleanup_cp(dev); + } +@@ -1768,6 +1766,51 @@ int radeon_driver_load(struct drm_device *dev, unsigned long flags) + return ret; + } + ++int radeon_master_create(struct drm_device *dev, struct drm_master *master) ++{ ++ struct drm_radeon_master_private *master_priv; ++ unsigned long sareapage; ++ int ret; ++ ++ master_priv = drm_calloc(1, sizeof(*master_priv), DRM_MEM_DRIVER); ++ if (!master_priv) ++ return -ENOMEM; ++ ++ /* prebuild the SAREA */ ++ sareapage = max_t(unsigned long, SAREA_MAX, PAGE_SIZE); ++ ret = drm_addmap(dev, 0, sareapage, _DRM_SHM, _DRM_CONTAINS_LOCK|_DRM_DRIVER, ++ &master_priv->sarea); ++ if (ret) { ++ DRM_ERROR("SAREA setup failed\n"); ++ return ret; ++ } ++ master_priv->sarea_priv = master_priv->sarea->handle + sizeof(struct drm_sarea); ++ master_priv->sarea_priv->pfCurrentPage = 0; ++ ++ master->driver_priv = master_priv; ++ return 0; ++} ++ ++void radeon_master_destroy(struct drm_device *dev, struct drm_master *master) ++{ ++ struct drm_radeon_master_private *master_priv = master->driver_priv; ++ ++ if (!master_priv) ++ return; ++ ++ if (master_priv->sarea_priv && ++ master_priv->sarea_priv->pfCurrentPage != 0) ++ radeon_cp_dispatch_flip(dev, master); ++ ++ master_priv->sarea_priv = NULL; ++ if (master_priv->sarea) ++ drm_rmmap_locked(dev, master_priv->sarea); ++ ++ drm_free(master_priv, sizeof(*master_priv), DRM_MEM_DRIVER); ++ ++ master->driver_priv = NULL; ++} ++ + /* Create mappings for registers and framebuffer so userland doesn't necessarily + * have to find them. + */ +diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c +index 71af746..fef2078 100644 +--- a/drivers/gpu/drm/radeon/radeon_drv.c ++++ b/drivers/gpu/drm/radeon/radeon_drv.c +@@ -96,6 +96,8 @@ static struct drm_driver driver = { + .enable_vblank = radeon_enable_vblank, + .disable_vblank = radeon_disable_vblank, + .dri_library_name = dri_library_name, ++ .master_create = radeon_master_create, ++ .master_destroy = radeon_master_destroy, + .irq_preinstall = radeon_driver_irq_preinstall, + .irq_postinstall = radeon_driver_irq_postinstall, + .irq_uninstall = radeon_driver_irq_uninstall, +diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h +index 3bbb871..490bc7c 100644 +--- a/drivers/gpu/drm/radeon/radeon_drv.h ++++ b/drivers/gpu/drm/radeon/radeon_drv.h +@@ -226,9 +226,13 @@ struct radeon_virt_surface { + #define RADEON_FLUSH_EMITED (1 < 0) + #define RADEON_PURGE_EMITED (1 < 1) + ++struct drm_radeon_master_private { ++ drm_local_map_t *sarea; ++ drm_radeon_sarea_t *sarea_priv; ++}; ++ + typedef struct drm_radeon_private { + drm_radeon_ring_buffer_t ring; +- drm_radeon_sarea_t *sarea_priv; + + u32 fb_location; + u32 fb_size; +@@ -409,6 +413,9 @@ extern int radeon_driver_open(struct drm_device *dev, + extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg); + ++extern int radeon_master_create(struct drm_device *dev, struct drm_master *master); ++extern void radeon_master_destroy(struct drm_device *dev, struct drm_master *master); ++extern void radeon_cp_dispatch_flip(struct drm_device *dev, struct drm_master *master); + /* r300_cmdbuf.c */ + extern void r300_init_reg_flags(struct drm_device *dev); + +@@ -1335,8 +1342,9 @@ do { \ + } while (0) + + #define VB_AGE_TEST_WITH_RETURN( dev_priv ) \ +-do { \ +- drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; \ ++do { \ ++ struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv; \ ++ drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv; \ + if ( sarea_priv->last_dispatch >= RADEON_MAX_VB_AGE ) { \ + int __ret = radeon_do_cp_idle( dev_priv ); \ + if ( __ret ) return __ret; \ +diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c +index 5d7153f..ef940a0 100644 +--- a/drivers/gpu/drm/radeon/radeon_state.c ++++ b/drivers/gpu/drm/radeon/radeon_state.c +@@ -742,13 +742,14 @@ static struct { + */ + + static void radeon_clear_box(drm_radeon_private_t * dev_priv, ++ struct drm_radeon_master_private *master_priv, + int x, int y, int w, int h, int r, int g, int b) + { + u32 color; + RING_LOCALS; + +- x += dev_priv->sarea_priv->boxes[0].x1; +- y += dev_priv->sarea_priv->boxes[0].y1; ++ x += master_priv->sarea_priv->boxes[0].x1; ++ y += master_priv->sarea_priv->boxes[0].y1; + + switch (dev_priv->color_fmt) { + case RADEON_COLOR_FORMAT_RGB565: +@@ -776,7 +777,7 @@ static void radeon_clear_box(drm_radeon_private_t * dev_priv, + RADEON_GMC_SRC_DATATYPE_COLOR | + RADEON_ROP3_P | RADEON_GMC_CLR_CMP_CNTL_DIS); + +- if (dev_priv->sarea_priv->pfCurrentPage == 1) { ++ if (master_priv->sarea_priv->pfCurrentPage == 1) { + OUT_RING(dev_priv->front_pitch_offset); + } else { + OUT_RING(dev_priv->back_pitch_offset); +@@ -790,7 +791,7 @@ static void radeon_clear_box(drm_radeon_private_t * dev_priv, + ADVANCE_RING(); + } + +-static void radeon_cp_performance_boxes(drm_radeon_private_t * dev_priv) ++static void radeon_cp_performance_boxes(drm_radeon_private_t *dev_priv, struct drm_radeon_master_private *master_priv) + { + /* Collapse various things into a wait flag -- trying to + * guess if userspase slept -- better just to have them tell us. +@@ -807,12 +808,12 @@ static void radeon_cp_performance_boxes(drm_radeon_private_t * dev_priv) + /* Purple box for page flipping + */ + if (dev_priv->stats.boxes & RADEON_BOX_FLIP) +- radeon_clear_box(dev_priv, 4, 4, 8, 8, 255, 0, 255); ++ radeon_clear_box(dev_priv, master_priv, 4, 4, 8, 8, 255, 0, 255); + + /* Red box if we have to wait for idle at any point + */ + if (dev_priv->stats.boxes & RADEON_BOX_WAIT_IDLE) +- radeon_clear_box(dev_priv, 16, 4, 8, 8, 255, 0, 0); ++ radeon_clear_box(dev_priv, master_priv, 16, 4, 8, 8, 255, 0, 0); + + /* Blue box: lost context? + */ +@@ -820,12 +821,12 @@ static void radeon_cp_performance_boxes(drm_radeon_private_t * dev_priv) + /* Yellow box for texture swaps + */ + if (dev_priv->stats.boxes & RADEON_BOX_TEXTURE_LOAD) +- radeon_clear_box(dev_priv, 40, 4, 8, 8, 255, 255, 0); ++ radeon_clear_box(dev_priv, master_priv, 40, 4, 8, 8, 255, 255, 0); + + /* Green box if hardware never idles (as far as we can tell) + */ + if (!(dev_priv->stats.boxes & RADEON_BOX_DMA_IDLE)) +- radeon_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0); ++ radeon_clear_box(dev_priv, master_priv, 64, 4, 8, 8, 0, 255, 0); + + /* Draw bars indicating number of buffers allocated + * (not a great measure, easily confused) +@@ -834,7 +835,7 @@ static void radeon_cp_performance_boxes(drm_radeon_private_t * dev_priv) + if (dev_priv->stats.requested_bufs > 100) + dev_priv->stats.requested_bufs = 100; + +- radeon_clear_box(dev_priv, 4, 16, ++ radeon_clear_box(dev_priv, master_priv, 4, 16, + dev_priv->stats.requested_bufs, 4, + 196, 128, 128); + } +@@ -848,11 +849,13 @@ static void radeon_cp_performance_boxes(drm_radeon_private_t * dev_priv) + */ + + static void radeon_cp_dispatch_clear(struct drm_device * dev, ++ struct drm_master *master, + drm_radeon_clear_t * clear, + drm_radeon_clear_rect_t * depth_boxes) + { + drm_radeon_private_t *dev_priv = dev->dev_private; +- drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ struct drm_radeon_master_private *master_priv = master->driver_priv; ++ drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv; + drm_radeon_depth_clear_t *depth_clear = &dev_priv->depth_clear; + int nbox = sarea_priv->nbox; + struct drm_clip_rect *pbox = sarea_priv->boxes; +@@ -864,7 +867,7 @@ static void radeon_cp_dispatch_clear(struct drm_device * dev, + + dev_priv->stats.clears++; + +- if (dev_priv->sarea_priv->pfCurrentPage == 1) { ++ if (sarea_priv->pfCurrentPage == 1) { + unsigned int tmp = flags; + + flags &= ~(RADEON_FRONT | RADEON_BACK); +@@ -890,7 +893,7 @@ static void radeon_cp_dispatch_clear(struct drm_device * dev, + + /* Make sure we restore the 3D state next time. + */ +- dev_priv->sarea_priv->ctx_owner = 0; ++ sarea_priv->ctx_owner = 0; + + for (i = 0; i < nbox; i++) { + int x = pbox[i].x1; +@@ -967,7 +970,7 @@ static void radeon_cp_dispatch_clear(struct drm_device * dev, + /* Make sure we restore the 3D state next time. + * we haven't touched any "normal" state - still need this? + */ +- dev_priv->sarea_priv->ctx_owner = 0; ++ sarea_priv->ctx_owner = 0; + + if ((dev_priv->flags & RADEON_HAS_HIERZ) + && (flags & RADEON_USE_HIERZ)) { +@@ -1214,7 +1217,7 @@ static void radeon_cp_dispatch_clear(struct drm_device * dev, + + /* Make sure we restore the 3D state next time. + */ +- dev_priv->sarea_priv->ctx_owner = 0; ++ sarea_priv->ctx_owner = 0; + + for (i = 0; i < nbox; i++) { + +@@ -1285,7 +1288,7 @@ static void radeon_cp_dispatch_clear(struct drm_device * dev, + + /* Make sure we restore the 3D state next time. + */ +- dev_priv->sarea_priv->ctx_owner = 0; ++ sarea_priv->ctx_owner = 0; + + for (i = 0; i < nbox; i++) { + +@@ -1328,20 +1331,21 @@ static void radeon_cp_dispatch_clear(struct drm_device * dev, + * wait on this value before performing the clear ioctl. We + * need this because the card's so damned fast... + */ +- dev_priv->sarea_priv->last_clear++; ++ sarea_priv->last_clear++; + + BEGIN_RING(4); + +- RADEON_CLEAR_AGE(dev_priv->sarea_priv->last_clear); ++ RADEON_CLEAR_AGE(sarea_priv->last_clear); + RADEON_WAIT_UNTIL_IDLE(); + + ADVANCE_RING(); + } + +-static void radeon_cp_dispatch_swap(struct drm_device * dev) ++static void radeon_cp_dispatch_swap(struct drm_device *dev, struct drm_master *master) + { + drm_radeon_private_t *dev_priv = dev->dev_private; +- drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ struct drm_radeon_master_private *master_priv = master->driver_priv; ++ drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv; + int nbox = sarea_priv->nbox; + struct drm_clip_rect *pbox = sarea_priv->boxes; + int i; +@@ -1351,7 +1355,7 @@ static void radeon_cp_dispatch_swap(struct drm_device * dev) + /* Do some trivial performance monitoring... + */ + if (dev_priv->do_boxes) +- radeon_cp_performance_boxes(dev_priv); ++ radeon_cp_performance_boxes(dev_priv, master_priv); + + /* Wait for the 3D stream to idle before dispatching the bitblt. + * This will prevent data corruption between the two streams. +@@ -1385,7 +1389,7 @@ static void radeon_cp_dispatch_swap(struct drm_device * dev) + /* Make this work even if front & back are flipped: + */ + OUT_RING(CP_PACKET0(RADEON_SRC_PITCH_OFFSET, 1)); +- if (dev_priv->sarea_priv->pfCurrentPage == 0) { ++ if (sarea_priv->pfCurrentPage == 0) { + OUT_RING(dev_priv->back_pitch_offset); + OUT_RING(dev_priv->front_pitch_offset); + } else { +@@ -1405,31 +1409,32 @@ static void radeon_cp_dispatch_swap(struct drm_device * dev) + * throttle the framerate by waiting for this value before + * performing the swapbuffer ioctl. + */ +- dev_priv->sarea_priv->last_frame++; ++ sarea_priv->last_frame++; + + BEGIN_RING(4); + +- RADEON_FRAME_AGE(dev_priv->sarea_priv->last_frame); ++ RADEON_FRAME_AGE(sarea_priv->last_frame); + RADEON_WAIT_UNTIL_2D_IDLE(); + + ADVANCE_RING(); + } + +-static void radeon_cp_dispatch_flip(struct drm_device * dev) ++void radeon_cp_dispatch_flip(struct drm_device *dev, struct drm_master *master) + { + drm_radeon_private_t *dev_priv = dev->dev_private; +- struct drm_sarea *sarea = (struct drm_sarea *) dev_priv->sarea->handle; +- int offset = (dev_priv->sarea_priv->pfCurrentPage == 1) ++ struct drm_radeon_master_private *master_priv = master->driver_priv; ++ struct drm_sarea *sarea = (struct drm_sarea *)master_priv->sarea->handle; ++ int offset = (master_priv->sarea_priv->pfCurrentPage == 1) + ? dev_priv->front_offset : dev_priv->back_offset; + RING_LOCALS; + DRM_DEBUG("pfCurrentPage=%d\n", +- dev_priv->sarea_priv->pfCurrentPage); ++ master_priv->sarea_priv->pfCurrentPage); + + /* Do some trivial performance monitoring... + */ + if (dev_priv->do_boxes) { + dev_priv->stats.boxes |= RADEON_BOX_FLIP; +- radeon_cp_performance_boxes(dev_priv); ++ radeon_cp_performance_boxes(dev_priv, master_priv); + } + + /* Update the frame offsets for both CRTCs +@@ -1441,7 +1446,7 @@ static void radeon_cp_dispatch_flip(struct drm_device * dev) + ((sarea->frame.y * dev_priv->front_pitch + + sarea->frame.x * (dev_priv->color_fmt - 2)) & ~7) + + offset); +- OUT_RING_REG(RADEON_CRTC2_OFFSET, dev_priv->sarea_priv->crtc2_base ++ OUT_RING_REG(RADEON_CRTC2_OFFSET, master_priv->sarea_priv->crtc2_base + + offset); + + ADVANCE_RING(); +@@ -1450,13 +1455,13 @@ static void radeon_cp_dispatch_flip(struct drm_device * dev) + * throttle the framerate by waiting for this value before + * performing the swapbuffer ioctl. + */ +- dev_priv->sarea_priv->last_frame++; +- dev_priv->sarea_priv->pfCurrentPage = +- 1 - dev_priv->sarea_priv->pfCurrentPage; ++ master_priv->sarea_priv->last_frame++; ++ master_priv->sarea_priv->pfCurrentPage = ++ 1 - master_priv->sarea_priv->pfCurrentPage; + + BEGIN_RING(2); + +- RADEON_FRAME_AGE(dev_priv->sarea_priv->last_frame); ++ RADEON_FRAME_AGE(master_priv->sarea_priv->last_frame); + + ADVANCE_RING(); + } +@@ -1494,11 +1499,13 @@ typedef struct { + } drm_radeon_tcl_prim_t; + + static void radeon_cp_dispatch_vertex(struct drm_device * dev, ++ struct drm_file *file_priv, + struct drm_buf * buf, + drm_radeon_tcl_prim_t * prim) + { + drm_radeon_private_t *dev_priv = dev->dev_private; +- drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv; ++ drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv; + int offset = dev_priv->gart_buffers_offset + buf->offset + prim->start; + int numverts = (int)prim->numverts; + int nbox = sarea_priv->nbox; +@@ -1539,13 +1546,14 @@ static void radeon_cp_dispatch_vertex(struct drm_device * dev, + } while (i < nbox); + } + +-static void radeon_cp_discard_buffer(struct drm_device * dev, struct drm_buf * buf) ++static void radeon_cp_discard_buffer(struct drm_device *dev, struct drm_master *master, struct drm_buf *buf) + { + drm_radeon_private_t *dev_priv = dev->dev_private; ++ struct drm_radeon_master_private *master_priv = master->driver_priv; + drm_radeon_buf_priv_t *buf_priv = buf->dev_private; + RING_LOCALS; + +- buf_priv->age = ++dev_priv->sarea_priv->last_dispatch; ++ buf_priv->age = ++master_priv->sarea_priv->last_dispatch; + + /* Emit the vertex buffer age */ + BEGIN_RING(2); +@@ -1590,12 +1598,14 @@ static void radeon_cp_dispatch_indirect(struct drm_device * dev, + } + } + +-static void radeon_cp_dispatch_indices(struct drm_device * dev, ++static void radeon_cp_dispatch_indices(struct drm_device *dev, ++ struct drm_master *master, + struct drm_buf * elt_buf, + drm_radeon_tcl_prim_t * prim) + { + drm_radeon_private_t *dev_priv = dev->dev_private; +- drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ struct drm_radeon_master_private *master_priv = master->driver_priv; ++ drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv; + int offset = dev_priv->gart_buffers_offset + prim->offset; + u32 *data; + int dwords; +@@ -1870,7 +1880,7 @@ static int radeon_cp_dispatch_texture(struct drm_device * dev, + ADVANCE_RING(); + COMMIT_RING(); + +- radeon_cp_discard_buffer(dev, buf); ++ radeon_cp_discard_buffer(dev, file_priv->master, buf); + + /* Update the input parameters for next time */ + image->y += height; +@@ -2110,7 +2120,8 @@ static int radeon_surface_free(struct drm_device *dev, void *data, struct drm_fi + static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *file_priv) + { + drm_radeon_private_t *dev_priv = dev->dev_private; +- drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv; ++ drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv; + drm_radeon_clear_t *clear = data; + drm_radeon_clear_rect_t depth_boxes[RADEON_NR_SAREA_CLIPRECTS]; + DRM_DEBUG("\n"); +@@ -2126,7 +2137,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file * + sarea_priv->nbox * sizeof(depth_boxes[0]))) + return -EFAULT; + +- radeon_cp_dispatch_clear(dev, clear, depth_boxes); ++ radeon_cp_dispatch_clear(dev, file_priv->master, clear, depth_boxes); + + COMMIT_RING(); + return 0; +@@ -2134,9 +2145,10 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file * + + /* Not sure why this isn't set all the time: + */ +-static int radeon_do_init_pageflip(struct drm_device * dev) ++static int radeon_do_init_pageflip(struct drm_device *dev, struct drm_master *master) + { + drm_radeon_private_t *dev_priv = dev->dev_private; ++ struct drm_radeon_master_private *master_priv = master->driver_priv; + RING_LOCALS; + + DRM_DEBUG("\n"); +@@ -2153,8 +2165,8 @@ static int radeon_do_init_pageflip(struct drm_device * dev) + + dev_priv->page_flipping = 1; + +- if (dev_priv->sarea_priv->pfCurrentPage != 1) +- dev_priv->sarea_priv->pfCurrentPage = 0; ++ if (master_priv->sarea_priv->pfCurrentPage != 1) ++ master_priv->sarea_priv->pfCurrentPage = 0; + + return 0; + } +@@ -2172,9 +2184,9 @@ static int radeon_cp_flip(struct drm_device *dev, void *data, struct drm_file *f + RING_SPACE_TEST_WITH_RETURN(dev_priv); + + if (!dev_priv->page_flipping) +- radeon_do_init_pageflip(dev); ++ radeon_do_init_pageflip(dev, file_priv->master); + +- radeon_cp_dispatch_flip(dev); ++ radeon_cp_dispatch_flip(dev, file_priv->master); + + COMMIT_RING(); + return 0; +@@ -2183,7 +2195,9 @@ static int radeon_cp_flip(struct drm_device *dev, void *data, struct drm_file *f + static int radeon_cp_swap(struct drm_device *dev, void *data, struct drm_file *file_priv) + { + drm_radeon_private_t *dev_priv = dev->dev_private; +- drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv; ++ drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv; ++ + DRM_DEBUG("\n"); + + LOCK_TEST_WITH_RETURN(dev, file_priv); +@@ -2193,8 +2207,8 @@ static int radeon_cp_swap(struct drm_device *dev, void *data, struct drm_file *f + if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS) + sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS; + +- radeon_cp_dispatch_swap(dev); +- dev_priv->sarea_priv->ctx_owner = 0; ++ radeon_cp_dispatch_swap(dev, file_priv->master); ++ sarea_priv->ctx_owner = 0; + + COMMIT_RING(); + return 0; +@@ -2203,7 +2217,8 @@ static int radeon_cp_swap(struct drm_device *dev, void *data, struct drm_file *f + static int radeon_cp_vertex(struct drm_device *dev, void *data, struct drm_file *file_priv) + { + drm_radeon_private_t *dev_priv = dev->dev_private; +- drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv; ++ drm_radeon_sarea_t *sarea_priv; + struct drm_device_dma *dma = dev->dma; + struct drm_buf *buf; + drm_radeon_vertex_t *vertex = data; +@@ -2211,6 +2226,8 @@ static int radeon_cp_vertex(struct drm_device *dev, void *data, struct drm_file + + LOCK_TEST_WITH_RETURN(dev, file_priv); + ++ sarea_priv = master_priv->sarea_priv; ++ + DRM_DEBUG("pid=%d index=%d count=%d discard=%d\n", + DRM_CURRENTPID, vertex->idx, vertex->count, vertex->discard); + +@@ -2263,13 +2280,13 @@ static int radeon_cp_vertex(struct drm_device *dev, void *data, struct drm_file + prim.finish = vertex->count; /* unused */ + prim.prim = vertex->prim; + prim.numverts = vertex->count; +- prim.vc_format = dev_priv->sarea_priv->vc_format; ++ prim.vc_format = sarea_priv->vc_format; + +- radeon_cp_dispatch_vertex(dev, buf, &prim); ++ radeon_cp_dispatch_vertex(dev, file_priv, buf, &prim); + } + + if (vertex->discard) { +- radeon_cp_discard_buffer(dev, buf); ++ radeon_cp_discard_buffer(dev, file_priv->master, buf); + } + + COMMIT_RING(); +@@ -2279,7 +2296,8 @@ static int radeon_cp_vertex(struct drm_device *dev, void *data, struct drm_file + static int radeon_cp_indices(struct drm_device *dev, void *data, struct drm_file *file_priv) + { + drm_radeon_private_t *dev_priv = dev->dev_private; +- drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv; ++ drm_radeon_sarea_t *sarea_priv; + struct drm_device_dma *dma = dev->dma; + struct drm_buf *buf; + drm_radeon_indices_t *elts = data; +@@ -2288,6 +2306,8 @@ static int radeon_cp_indices(struct drm_device *dev, void *data, struct drm_file + + LOCK_TEST_WITH_RETURN(dev, file_priv); + ++ sarea_priv = master_priv->sarea_priv; ++ + DRM_DEBUG("pid=%d index=%d start=%d end=%d discard=%d\n", + DRM_CURRENTPID, elts->idx, elts->start, elts->end, + elts->discard); +@@ -2353,11 +2373,11 @@ static int radeon_cp_indices(struct drm_device *dev, void *data, struct drm_file + prim.prim = elts->prim; + prim.offset = 0; /* offset from start of dma buffers */ + prim.numverts = RADEON_MAX_VB_VERTS; /* duh */ +- prim.vc_format = dev_priv->sarea_priv->vc_format; ++ prim.vc_format = sarea_priv->vc_format; + +- radeon_cp_dispatch_indices(dev, buf, &prim); ++ radeon_cp_dispatch_indices(dev, file_priv->master, buf, &prim); + if (elts->discard) { +- radeon_cp_discard_buffer(dev, buf); ++ radeon_cp_discard_buffer(dev, file_priv->master, buf); + } + + COMMIT_RING(); +@@ -2468,7 +2488,7 @@ static int radeon_cp_indirect(struct drm_device *dev, void *data, struct drm_fil + */ + radeon_cp_dispatch_indirect(dev, buf, indirect->start, indirect->end); + if (indirect->discard) { +- radeon_cp_discard_buffer(dev, buf); ++ radeon_cp_discard_buffer(dev, file_priv->master, buf); + } + + COMMIT_RING(); +@@ -2478,7 +2498,8 @@ static int radeon_cp_indirect(struct drm_device *dev, void *data, struct drm_fil + static int radeon_cp_vertex2(struct drm_device *dev, void *data, struct drm_file *file_priv) + { + drm_radeon_private_t *dev_priv = dev->dev_private; +- drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv; ++ drm_radeon_sarea_t *sarea_priv; + struct drm_device_dma *dma = dev->dma; + struct drm_buf *buf; + drm_radeon_vertex2_t *vertex = data; +@@ -2487,6 +2508,8 @@ static int radeon_cp_vertex2(struct drm_device *dev, void *data, struct drm_file + + LOCK_TEST_WITH_RETURN(dev, file_priv); + ++ sarea_priv = master_priv->sarea_priv; ++ + DRM_DEBUG("pid=%d index=%d discard=%d\n", + DRM_CURRENTPID, vertex->idx, vertex->discard); + +@@ -2547,12 +2570,12 @@ static int radeon_cp_vertex2(struct drm_device *dev, void *data, struct drm_file + tclprim.offset = prim.numverts * 64; + tclprim.numverts = RADEON_MAX_VB_VERTS; /* duh */ + +- radeon_cp_dispatch_indices(dev, buf, &tclprim); ++ radeon_cp_dispatch_indices(dev, file_priv->master, buf, &tclprim); + } else { + tclprim.numverts = prim.numverts; + tclprim.offset = 0; /* not used */ + +- radeon_cp_dispatch_vertex(dev, buf, &tclprim); ++ radeon_cp_dispatch_vertex(dev, file_priv, buf, &tclprim); + } + + if (sarea_priv->nbox == 1) +@@ -2560,7 +2583,7 @@ static int radeon_cp_vertex2(struct drm_device *dev, void *data, struct drm_file + } + + if (vertex->discard) { +- radeon_cp_discard_buffer(dev, buf); ++ radeon_cp_discard_buffer(dev, file_priv->master, buf); + } + + COMMIT_RING(); +@@ -2909,7 +2932,7 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file + goto err; + } + +- radeon_cp_discard_buffer(dev, buf); ++ radeon_cp_discard_buffer(dev, file_priv->master, buf); + break; + + case RADEON_CMD_PACKET3: +@@ -3020,7 +3043,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil + */ + case RADEON_PARAM_SAREA_HANDLE: + /* The lock is the first dword in the sarea. */ +- value = (long)dev->lock.hw_lock; ++ /* no users of this parameter */ + break; + #endif + case RADEON_PARAM_GART_TEX_HANDLE: +@@ -3064,6 +3087,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil + static int radeon_cp_setparam(struct drm_device *dev, void *data, struct drm_file *file_priv) + { + drm_radeon_private_t *dev_priv = dev->dev_private; ++ struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv; + drm_radeon_setparam_t *sp = data; + struct drm_radeon_driver_file_fields *radeon_priv; + +@@ -3078,12 +3102,14 @@ static int radeon_cp_setparam(struct drm_device *dev, void *data, struct drm_fil + DRM_DEBUG("color tiling disabled\n"); + dev_priv->front_pitch_offset &= ~RADEON_DST_TILE_MACRO; + dev_priv->back_pitch_offset &= ~RADEON_DST_TILE_MACRO; +- dev_priv->sarea_priv->tiling_enabled = 0; ++ if (master_priv->sarea_priv) ++ master_priv->sarea_priv->tiling_enabled = 0; + } else if (sp->value == 1) { + DRM_DEBUG("color tiling enabled\n"); + dev_priv->front_pitch_offset |= RADEON_DST_TILE_MACRO; + dev_priv->back_pitch_offset |= RADEON_DST_TILE_MACRO; +- dev_priv->sarea_priv->tiling_enabled = 1; ++ if (master_priv->sarea_priv) ++ master_priv->sarea_priv->tiling_enabled = 1; + } + break; + case RADEON_SETPARAM_PCIGART_LOCATION: +@@ -3129,14 +3155,6 @@ void radeon_driver_preclose(struct drm_device *dev, struct drm_file *file_priv) + + void radeon_driver_lastclose(struct drm_device *dev) + { +- if (dev->dev_private) { +- drm_radeon_private_t *dev_priv = dev->dev_private; +- +- if (dev_priv->sarea_priv && +- dev_priv->sarea_priv->pfCurrentPage != 0) +- radeon_cp_dispatch_flip(dev); +- } +- + radeon_do_release(dev); + } + +diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c +index 448d209..e621072 100644 +--- a/drivers/video/console/vgacon.c ++++ b/drivers/video/console/vgacon.c +@@ -112,6 +112,23 @@ static int vga_video_font_height; + static int vga_scan_lines __read_mostly; + static unsigned int vga_rolled_over; + ++int vgacon_text_mode_force = 0; ++ ++bool vgacon_text_force(void) ++{ ++ return vgacon_text_mode_force ? true : false; ++} ++EXPORT_SYMBOL(vgacon_text_force); ++ ++static int __init text_mode(char *str) ++{ ++ vgacon_text_mode_force = 1; ++ return 1; ++} ++ ++/* force text mode - used by kernel modesetting */ ++__setup("nomodeset", text_mode); ++ + static int __init no_scroll(char *str) + { + /* +diff --git a/include/drm/Kbuild b/include/drm/Kbuild +index 82b6983..b940fdf 100644 +--- a/include/drm/Kbuild ++++ b/include/drm/Kbuild +@@ -1,4 +1,4 @@ +-unifdef-y += drm.h drm_sarea.h ++unifdef-y += drm.h drm_sarea.h drm_mode.h + unifdef-y += i810_drm.h + unifdef-y += i830_drm.h + unifdef-y += i915_drm.h +diff --git a/include/drm/drm.h b/include/drm/drm.h +index f46ba4b..32e5096 100644 +--- a/include/drm/drm.h ++++ b/include/drm/drm.h +@@ -173,6 +173,7 @@ enum drm_map_type { + _DRM_AGP = 3, /**< AGP/GART */ + _DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */ + _DRM_CONSISTENT = 5, /**< Consistent memory for PCI DMA */ ++ _DRM_GEM = 6, /**< GEM object */ + }; + + /** +@@ -598,6 +599,8 @@ struct drm_gem_open { + uint64_t size; + }; + ++#include "drm_mode.h" ++ + #define DRM_IOCTL_BASE 'd' + #define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr) + #define DRM_IOR(nr,type) _IOR(DRM_IOCTL_BASE,nr,type) +@@ -634,6 +637,9 @@ struct drm_gem_open { + #define DRM_IOCTL_SET_SAREA_CTX DRM_IOW( 0x1c, struct drm_ctx_priv_map) + #define DRM_IOCTL_GET_SAREA_CTX DRM_IOWR(0x1d, struct drm_ctx_priv_map) + ++#define DRM_IOCTL_SET_MASTER DRM_IO(0x1e) ++#define DRM_IOCTL_DROP_MASTER DRM_IO(0x1f) ++ + #define DRM_IOCTL_ADD_CTX DRM_IOWR(0x20, struct drm_ctx) + #define DRM_IOCTL_RM_CTX DRM_IOWR(0x21, struct drm_ctx) + #define DRM_IOCTL_MOD_CTX DRM_IOW( 0x22, struct drm_ctx) +@@ -664,6 +670,24 @@ struct drm_gem_open { + + #define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, struct drm_update_draw) + ++#define DRM_IOCTL_MODE_GETRESOURCES DRM_IOWR(0xA0, struct drm_mode_card_res) ++#define DRM_IOCTL_MODE_GETCRTC DRM_IOWR(0xA1, struct drm_mode_crtc) ++#define DRM_IOCTL_MODE_SETCRTC DRM_IOWR(0xA2, struct drm_mode_crtc) ++#define DRM_IOCTL_MODE_CURSOR DRM_IOWR(0xA3, struct drm_mode_cursor) ++#define DRM_IOCTL_MODE_GETGAMMA DRM_IOWR(0xA4, struct drm_mode_crtc_lut) ++#define DRM_IOCTL_MODE_SETGAMMA DRM_IOWR(0xA5, struct drm_mode_crtc_lut) ++#define DRM_IOCTL_MODE_GETENCODER DRM_IOWR(0xA6, struct drm_mode_get_encoder) ++#define DRM_IOCTL_MODE_GETCONNECTOR DRM_IOWR(0xA7, struct drm_mode_get_connector) ++#define DRM_IOCTL_MODE_ATTACHMODE DRM_IOWR(0xA8, struct drm_mode_mode_cmd) ++#define DRM_IOCTL_MODE_DETACHMODE DRM_IOWR(0xA9, struct drm_mode_mode_cmd) ++ ++#define DRM_IOCTL_MODE_GETPROPERTY DRM_IOWR(0xAA, struct drm_mode_get_property) ++#define DRM_IOCTL_MODE_SETPROPERTY DRM_IOWR(0xAB, struct drm_mode_connector_set_property) ++#define DRM_IOCTL_MODE_GETPROPBLOB DRM_IOWR(0xAC, struct drm_mode_get_blob) ++#define DRM_IOCTL_MODE_GETFB DRM_IOWR(0xAD, struct drm_mode_fb_cmd) ++#define DRM_IOCTL_MODE_ADDFB DRM_IOWR(0xAE, struct drm_mode_fb_cmd) ++#define DRM_IOCTL_MODE_RMFB DRM_IOWR(0xAF, unsigned int) ++ + /** + * Device specific ioctls should only be in their respective headers + * The device specific ioctl range is from 0x40 to 0x99. +diff --git a/include/drm/drmP.h b/include/drm/drmP.h +index d5e8e5c..7802c80 100644 +--- a/include/drm/drmP.h ++++ b/include/drm/drmP.h +@@ -105,6 +105,7 @@ struct drm_device; + #define DRIVER_FB_DMA 0x400 + #define DRIVER_IRQ_VBL2 0x800 + #define DRIVER_GEM 0x1000 ++#define DRIVER_MODESET 0x2000 + + /***********************************************************************/ + /** \name Begin the DRM... */ +@@ -238,11 +239,11 @@ struct drm_device; + */ + #define LOCK_TEST_WITH_RETURN( dev, file_priv ) \ + do { \ +- if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) || \ +- dev->lock.file_priv != file_priv ) { \ ++ if (!_DRM_LOCK_IS_HELD(file_priv->master->lock.hw_lock->lock) || \ ++ file_priv->master->lock.file_priv != file_priv) { \ + DRM_ERROR( "%s called without lock held, held %d owner %p %p\n",\ +- __func__, _DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ),\ +- dev->lock.file_priv, file_priv ); \ ++ __func__, _DRM_LOCK_IS_HELD(file_priv->master->lock.hw_lock->lock),\ ++ file_priv->master->lock.file_priv, file_priv); \ + return -EINVAL; \ + } \ + } while (0) +@@ -276,6 +277,7 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd, + #define DRM_AUTH 0x1 + #define DRM_MASTER 0x2 + #define DRM_ROOT_ONLY 0x4 ++#define DRM_CONTROL_ALLOW 0x8 + + struct drm_ioctl_desc { + unsigned int cmd; +@@ -379,21 +381,26 @@ struct drm_buf_entry { + /** File private data */ + struct drm_file { + int authenticated; +- int master; + pid_t pid; + uid_t uid; + drm_magic_t magic; + unsigned long ioctl_count; + struct list_head lhead; + struct drm_minor *minor; +- int remove_auth_on_close; + unsigned long lock_count; ++ + /** Mapping of mm object handles to object pointers. */ + struct idr object_idr; + /** Lock for synchronization of access to object_idr. */ + spinlock_t table_lock; ++ + struct file *filp; + void *driver_priv; ++ ++ int is_master; /* this file private is a master for a minor */ ++ struct drm_master *master; /* master this node is currently associated with ++ N.B. not always minor->master */ ++ struct list_head fbs; + }; + + /** Wait queue */ +@@ -523,6 +530,8 @@ struct drm_map_list { + struct drm_hash_item hash; + struct drm_map *map; /**< mapping */ + uint64_t user_token; ++ struct drm_master *master; ++ struct drm_mm_node *file_offset_node; /**< fake offset */ + }; + + typedef struct drm_map drm_local_map_t; +@@ -563,6 +572,14 @@ struct drm_ati_pcigart_info { + }; + + /** ++ * GEM specific mm private for tracking GEM objects ++ */ ++struct drm_gem_mm { ++ struct drm_mm offset_manager; /**< Offset mgmt for buffer objects */ ++ struct drm_open_hash offset_hash; /**< User token hash table for maps */ ++}; ++ ++/** + * This structure defines the drm_mm memory object, which will be used by the + * DRM for its buffer objects. + */ +@@ -579,6 +596,9 @@ struct drm_gem_object { + /** File representing the shmem storage */ + struct file *filp; + ++ /* Mapping info for this object */ ++ struct drm_map_list map_list; ++ + /** + * Size of the object, in bytes. Immutable over the object's + * lifetime. +@@ -612,6 +632,33 @@ struct drm_gem_object { + void *driver_private; + }; + ++#include "drm_crtc.h" ++ ++/* per-master structure */ ++struct drm_master { ++ ++ struct kref refcount; /* refcount for this master */ ++ ++ struct list_head head; /**< each minor contains a list of masters */ ++ struct drm_minor *minor; /**< link back to minor we are a master for */ ++ ++ char *unique; /**< Unique identifier: e.g., busid */ ++ int unique_len; /**< Length of unique field */ ++ int unique_size; /**< amount allocated */ ++ ++ int blocked; /**< Blocked due to VC switch? */ ++ ++ /** \name Authentication */ ++ /*@{ */ ++ struct drm_open_hash magiclist; ++ struct list_head magicfree; ++ /*@} */ ++ ++ struct drm_lock_data lock; /**< Information on hardware lock */ ++ ++ void *driver_priv; /**< Private structure for driver to use */ ++}; ++ + /** + * DRM driver structure. This structure represent the common code for + * a family of cards. There will one drm_device for each card present +@@ -712,6 +759,10 @@ struct drm_driver { + void (*set_version) (struct drm_device *dev, + struct drm_set_version *sv); + ++ /* Master routines */ ++ int (*master_create)(struct drm_device *dev, struct drm_master *master); ++ void (*master_destroy)(struct drm_device *dev, struct drm_master *master); ++ + int (*proc_init)(struct drm_minor *minor); + void (*proc_cleanup)(struct drm_minor *minor); + +@@ -724,6 +775,9 @@ struct drm_driver { + int (*gem_init_object) (struct drm_gem_object *obj); + void (*gem_free_object) (struct drm_gem_object *obj); + ++ /* Driver private ops for this object */ ++ struct vm_operations_struct *gem_vm_ops; ++ + int major; + int minor; + int patchlevel; +@@ -737,10 +791,14 @@ struct drm_driver { + int num_ioctls; + struct file_operations fops; + struct pci_driver pci_driver; ++ /* List of devices hanging off this driver */ ++ struct list_head device_list; + }; + + #define DRM_MINOR_UNASSIGNED 0 + #define DRM_MINOR_LEGACY 1 ++#define DRM_MINOR_CONTROL 2 ++#define DRM_MINOR_RENDER 3 + + /** + * DRM minor structure. This structure represents a drm minor number. +@@ -752,6 +810,9 @@ struct drm_minor { + struct device kdev; /**< Linux device */ + struct drm_device *dev; + struct proc_dir_entry *dev_root; /**< proc directory entry */ ++ struct drm_master *master; /* currently active master for this node */ ++ struct list_head master_list; ++ struct drm_mode_group mode_group; + }; + + /** +@@ -759,13 +820,10 @@ struct drm_minor { + * may contain multiple heads. + */ + struct drm_device { +- char *unique; /**< Unique identifier: e.g., busid */ +- int unique_len; /**< Length of unique field */ ++ struct list_head driver_item; /**< list of devices per driver */ + char *devname; /**< For /proc/interrupts */ + int if_version; /**< Highest interface version set */ + +- int blocked; /**< Blocked due to VC switch? */ +- + /** \name Locks */ + /*@{ */ + spinlock_t count_lock; /**< For inuse, drm_device::open_count, drm_device::buf_use */ +@@ -788,12 +846,7 @@ struct drm_device { + atomic_t counts[15]; + /*@} */ + +- /** \name Authentication */ +- /*@{ */ + struct list_head filelist; +- struct drm_open_hash magiclist; /**< magic hash table */ +- struct list_head magicfree; +- /*@} */ + + /** \name Memory management */ + /*@{ */ +@@ -810,7 +863,7 @@ struct drm_device { + struct idr ctx_idr; + + struct list_head vmalist; /**< List of vmas (for debugging) */ +- struct drm_lock_data lock; /**< Information on hardware lock */ ++ + /*@} */ + + /** \name DMA queues (contexts) */ +@@ -881,12 +934,15 @@ struct drm_device { + struct drm_sg_mem *sg; /**< Scatter gather memory */ + int num_crtcs; /**< Number of CRTCs on this device */ + void *dev_private; /**< device private data */ ++ void *mm_private; ++ struct address_space *dev_mapping; + struct drm_sigdata sigdata; /**< For block_all_signals */ + sigset_t sigmask; + + struct drm_driver *driver; + drm_local_map_t *agp_buffer_map; + unsigned int agp_buffer_token; ++ struct drm_minor *control; /**< Control node for card */ + struct drm_minor *primary; /**< render type primary screen head */ + + /** \name Drawable information */ +@@ -895,6 +951,8 @@ struct drm_device { + struct idr drw_idr; + /*@} */ + ++ struct drm_mode_config mode_config; /**< Current mode config */ ++ + /** \name GEM information */ + /*@{ */ + spinlock_t object_name_lock; +@@ -997,6 +1055,8 @@ extern int drm_release(struct inode *inode, struct file *filp); + + /* Mapping support (drm_vm.h) */ + extern int drm_mmap(struct file *filp, struct vm_area_struct *vma); ++extern int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma); ++extern void drm_vm_open_locked(struct vm_area_struct *vma); + extern unsigned long drm_core_get_map_ofs(struct drm_map * map); + extern unsigned long drm_core_get_reg_ofs(struct drm_device *dev); + extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait); +@@ -1153,6 +1213,8 @@ extern int drm_vblank_get(struct drm_device *dev, int crtc); + extern void drm_vblank_put(struct drm_device *dev, int crtc); + extern void drm_vblank_cleanup(struct drm_device *dev); + /* Modesetting support */ ++extern void drm_vblank_pre_modeset(struct drm_device *dev, int crtc); ++extern void drm_vblank_post_modeset(struct drm_device *dev, int crtc); + extern int drm_modeset_ctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); + +@@ -1189,6 +1251,13 @@ extern int drm_agp_unbind_memory(DRM_AGP_MEM * handle); + extern void drm_agp_chipset_flush(struct drm_device *dev); + + /* Stub support (drm_stub.h) */ ++extern int drm_setmaster_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_dropmaster_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++struct drm_master *drm_master_create(struct drm_minor *minor); ++extern struct drm_master *drm_master_get(struct drm_master *master); ++extern void drm_master_put(struct drm_master **master); + extern int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent, + struct drm_driver *driver); + extern int drm_put_dev(struct drm_device *dev); +@@ -1231,7 +1300,11 @@ struct drm_sysfs_class; + extern struct class *drm_sysfs_create(struct module *owner, char *name); + extern void drm_sysfs_destroy(void); + extern int drm_sysfs_device_add(struct drm_minor *minor); ++extern void drm_sysfs_hotplug_event(struct drm_device *dev); + extern void drm_sysfs_device_remove(struct drm_minor *minor); ++extern char *drm_get_connector_status_name(enum drm_connector_status status); ++extern int drm_sysfs_connector_add(struct drm_connector *connector); ++extern void drm_sysfs_connector_remove(struct drm_connector *connector); + + /* + * Basic memory manager support (drm_mm.c) +@@ -1251,10 +1324,12 @@ extern int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size); + + /* Graphics Execution Manager library functions (drm_gem.c) */ + int drm_gem_init(struct drm_device *dev); ++void drm_gem_destroy(struct drm_device *dev); + void drm_gem_object_free(struct kref *kref); + struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev, + size_t size); + void drm_gem_object_handle_free(struct kref *kref); ++int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma); + + static inline void + drm_gem_object_reference(struct drm_gem_object *obj) +diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h +new file mode 100644 +index 0000000..0acb07f +--- /dev/null ++++ b/include/drm/drm_crtc.h +@@ -0,0 +1,733 @@ ++/* ++ * Copyright © 2006 Keith Packard ++ * Copyright © 2007-2008 Dave Airlie ++ * Copyright © 2007-2008 Intel Corporation ++ * Jesse Barnes ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++#ifndef __DRM_CRTC_H__ ++#define __DRM_CRTC_H__ ++ ++#include ++#include ++#include ++#include ++ ++#include ++ ++struct drm_device; ++struct drm_mode_set; ++struct drm_framebuffer; ++ ++ ++#define DRM_MODE_OBJECT_CRTC 0xcccccccc ++#define DRM_MODE_OBJECT_CONNECTOR 0xc0c0c0c0 ++#define DRM_MODE_OBJECT_ENCODER 0xe0e0e0e0 ++#define DRM_MODE_OBJECT_MODE 0xdededede ++#define DRM_MODE_OBJECT_PROPERTY 0xb0b0b0b0 ++#define DRM_MODE_OBJECT_FB 0xfbfbfbfb ++#define DRM_MODE_OBJECT_BLOB 0xbbbbbbbb ++ ++struct drm_mode_object { ++ uint32_t id; ++ uint32_t type; ++}; ++ ++/* ++ * Note on terminology: here, for brevity and convenience, we refer to connector ++ * control chips as 'CRTCs'. They can control any type of connector, VGA, LVDS, ++ * DVI, etc. And 'screen' refers to the whole of the visible display, which ++ * may span multiple monitors (and therefore multiple CRTC and connector ++ * structures). ++ */ ++ ++enum drm_mode_status { ++ MODE_OK = 0, /* Mode OK */ ++ MODE_HSYNC, /* hsync out of range */ ++ MODE_VSYNC, /* vsync out of range */ ++ MODE_H_ILLEGAL, /* mode has illegal horizontal timings */ ++ MODE_V_ILLEGAL, /* mode has illegal horizontal timings */ ++ MODE_BAD_WIDTH, /* requires an unsupported linepitch */ ++ MODE_NOMODE, /* no mode with a maching name */ ++ MODE_NO_INTERLACE, /* interlaced mode not supported */ ++ MODE_NO_DBLESCAN, /* doublescan mode not supported */ ++ MODE_NO_VSCAN, /* multiscan mode not supported */ ++ MODE_MEM, /* insufficient video memory */ ++ MODE_VIRTUAL_X, /* mode width too large for specified virtual size */ ++ MODE_VIRTUAL_Y, /* mode height too large for specified virtual size */ ++ MODE_MEM_VIRT, /* insufficient video memory given virtual size */ ++ MODE_NOCLOCK, /* no fixed clock available */ ++ MODE_CLOCK_HIGH, /* clock required is too high */ ++ MODE_CLOCK_LOW, /* clock required is too low */ ++ MODE_CLOCK_RANGE, /* clock/mode isn't in a ClockRange */ ++ MODE_BAD_HVALUE, /* horizontal timing was out of range */ ++ MODE_BAD_VVALUE, /* vertical timing was out of range */ ++ MODE_BAD_VSCAN, /* VScan value out of range */ ++ MODE_HSYNC_NARROW, /* horizontal sync too narrow */ ++ MODE_HSYNC_WIDE, /* horizontal sync too wide */ ++ MODE_HBLANK_NARROW, /* horizontal blanking too narrow */ ++ MODE_HBLANK_WIDE, /* horizontal blanking too wide */ ++ MODE_VSYNC_NARROW, /* vertical sync too narrow */ ++ MODE_VSYNC_WIDE, /* vertical sync too wide */ ++ MODE_VBLANK_NARROW, /* vertical blanking too narrow */ ++ MODE_VBLANK_WIDE, /* vertical blanking too wide */ ++ MODE_PANEL, /* exceeds panel dimensions */ ++ MODE_INTERLACE_WIDTH, /* width too large for interlaced mode */ ++ MODE_ONE_WIDTH, /* only one width is supported */ ++ MODE_ONE_HEIGHT, /* only one height is supported */ ++ MODE_ONE_SIZE, /* only one resolution is supported */ ++ MODE_NO_REDUCED, /* monitor doesn't accept reduced blanking */ ++ MODE_UNVERIFIED = -3, /* mode needs to reverified */ ++ MODE_BAD = -2, /* unspecified reason */ ++ MODE_ERROR = -1 /* error condition */ ++}; ++ ++#define DRM_MODE_TYPE_CLOCK_CRTC_C (DRM_MODE_TYPE_CLOCK_C | \ ++ DRM_MODE_TYPE_CRTC_C) ++ ++#define DRM_MODE(nm, t, c, hd, hss, hse, ht, hsk, vd, vss, vse, vt, vs, f) \ ++ .name = nm, .status = 0, .type = (t), .clock = (c), \ ++ .hdisplay = (hd), .hsync_start = (hss), .hsync_end = (hse), \ ++ .htotal = (ht), .hskew = (hsk), .vdisplay = (vd), \ ++ .vsync_start = (vss), .vsync_end = (vse), .vtotal = (vt), \ ++ .vscan = (vs), .flags = (f), .vrefresh = 0 ++ ++#define CRTC_INTERLACE_HALVE_V 0x1 /* halve V values for interlacing */ ++ ++struct drm_display_mode { ++ /* Header */ ++ struct list_head head; ++ struct drm_mode_object base; ++ ++ char name[DRM_DISPLAY_MODE_LEN]; ++ ++ int connector_count; ++ enum drm_mode_status status; ++ int type; ++ ++ /* Proposed mode values */ ++ int clock; ++ int hdisplay; ++ int hsync_start; ++ int hsync_end; ++ int htotal; ++ int hskew; ++ int vdisplay; ++ int vsync_start; ++ int vsync_end; ++ int vtotal; ++ int vscan; ++ unsigned int flags; ++ ++ /* Addressable image size (may be 0 for projectors, etc.) */ ++ int width_mm; ++ int height_mm; ++ ++ /* Actual mode we give to hw */ ++ int clock_index; ++ int synth_clock; ++ int crtc_hdisplay; ++ int crtc_hblank_start; ++ int crtc_hblank_end; ++ int crtc_hsync_start; ++ int crtc_hsync_end; ++ int crtc_htotal; ++ int crtc_hskew; ++ int crtc_vdisplay; ++ int crtc_vblank_start; ++ int crtc_vblank_end; ++ int crtc_vsync_start; ++ int crtc_vsync_end; ++ int crtc_vtotal; ++ int crtc_hadjusted; ++ int crtc_vadjusted; ++ ++ /* Driver private mode info */ ++ int private_size; ++ int *private; ++ int private_flags; ++ ++ int vrefresh; ++ float hsync; ++}; ++ ++enum drm_connector_status { ++ connector_status_connected = 1, ++ connector_status_disconnected = 2, ++ connector_status_unknown = 3, ++}; ++ ++enum subpixel_order { ++ SubPixelUnknown = 0, ++ SubPixelHorizontalRGB, ++ SubPixelHorizontalBGR, ++ SubPixelVerticalRGB, ++ SubPixelVerticalBGR, ++ SubPixelNone, ++}; ++ ++ ++/* ++ * Describes a given display (e.g. CRT or flat panel) and its limitations. ++ */ ++struct drm_display_info { ++ char name[DRM_DISPLAY_INFO_LEN]; ++ /* Input info */ ++ bool serration_vsync; ++ bool sync_on_green; ++ bool composite_sync; ++ bool separate_syncs; ++ bool blank_to_black; ++ unsigned char video_level; ++ bool digital; ++ /* Physical size */ ++ unsigned int width_mm; ++ unsigned int height_mm; ++ ++ /* Display parameters */ ++ unsigned char gamma; /* FIXME: storage format */ ++ bool gtf_supported; ++ bool standard_color; ++ enum { ++ monochrome = 0, ++ rgb, ++ other, ++ unknown, ++ } display_type; ++ bool active_off_supported; ++ bool suspend_supported; ++ bool standby_supported; ++ ++ /* Color info FIXME: storage format */ ++ unsigned short redx, redy; ++ unsigned short greenx, greeny; ++ unsigned short bluex, bluey; ++ unsigned short whitex, whitey; ++ ++ /* Clock limits FIXME: storage format */ ++ unsigned int min_vfreq, max_vfreq; ++ unsigned int min_hfreq, max_hfreq; ++ unsigned int pixel_clock; ++ ++ /* White point indices FIXME: storage format */ ++ unsigned int wpx1, wpy1; ++ unsigned int wpgamma1; ++ unsigned int wpx2, wpy2; ++ unsigned int wpgamma2; ++ ++ enum subpixel_order subpixel_order; ++ ++ char *raw_edid; /* if any */ ++}; ++ ++struct drm_framebuffer_funcs { ++ void (*destroy)(struct drm_framebuffer *framebuffer); ++ int (*create_handle)(struct drm_framebuffer *fb, ++ struct drm_file *file_priv, ++ unsigned int *handle); ++}; ++ ++struct drm_framebuffer { ++ struct drm_device *dev; ++ struct list_head head; ++ struct drm_mode_object base; ++ const struct drm_framebuffer_funcs *funcs; ++ unsigned int pitch; ++ unsigned int width; ++ unsigned int height; ++ /* depth can be 15 or 16 */ ++ unsigned int depth; ++ int bits_per_pixel; ++ int flags; ++ void *fbdev; ++ u32 pseudo_palette[17]; ++ struct list_head filp_head; ++}; ++ ++struct drm_property_blob { ++ struct drm_mode_object base; ++ struct list_head head; ++ unsigned int length; ++ void *data; ++}; ++ ++struct drm_property_enum { ++ uint64_t value; ++ struct list_head head; ++ char name[DRM_PROP_NAME_LEN]; ++}; ++ ++struct drm_property { ++ struct list_head head; ++ struct drm_mode_object base; ++ uint32_t flags; ++ char name[DRM_PROP_NAME_LEN]; ++ uint32_t num_values; ++ uint64_t *values; ++ ++ struct list_head enum_blob_list; ++}; ++ ++struct drm_crtc; ++struct drm_connector; ++struct drm_encoder; ++ ++/** ++ * drm_crtc_funcs - control CRTCs for a given device ++ * @dpms: control display power levels ++ * @save: save CRTC state ++ * @resore: restore CRTC state ++ * @lock: lock the CRTC ++ * @unlock: unlock the CRTC ++ * @shadow_allocate: allocate shadow pixmap ++ * @shadow_create: create shadow pixmap for rotation support ++ * @shadow_destroy: free shadow pixmap ++ * @mode_fixup: fixup proposed mode ++ * @mode_set: set the desired mode on the CRTC ++ * @gamma_set: specify color ramp for CRTC ++ * @destroy: deinit and free object. ++ * ++ * The drm_crtc_funcs structure is the central CRTC management structure ++ * in the DRM. Each CRTC controls one or more connectors (note that the name ++ * CRTC is simply historical, a CRTC may control LVDS, VGA, DVI, TV out, etc. ++ * connectors, not just CRTs). ++ * ++ * Each driver is responsible for filling out this structure at startup time, ++ * in addition to providing other modesetting features, like i2c and DDC ++ * bus accessors. ++ */ ++struct drm_crtc_funcs { ++ /* Save CRTC state */ ++ void (*save)(struct drm_crtc *crtc); /* suspend? */ ++ /* Restore CRTC state */ ++ void (*restore)(struct drm_crtc *crtc); /* resume? */ ++ ++ /* cursor controls */ ++ int (*cursor_set)(struct drm_crtc *crtc, struct drm_file *file_priv, ++ uint32_t handle, uint32_t width, uint32_t height); ++ int (*cursor_move)(struct drm_crtc *crtc, int x, int y); ++ ++ /* Set gamma on the CRTC */ ++ void (*gamma_set)(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, ++ uint32_t size); ++ /* Object destroy routine */ ++ void (*destroy)(struct drm_crtc *crtc); ++ ++ int (*set_config)(struct drm_mode_set *set); ++}; ++ ++/** ++ * drm_crtc - central CRTC control structure ++ * @enabled: is this CRTC enabled? ++ * @x: x position on screen ++ * @y: y position on screen ++ * @desired_mode: new desired mode ++ * @desired_x: desired x for desired_mode ++ * @desired_y: desired y for desired_mode ++ * @funcs: CRTC control functions ++ * ++ * Each CRTC may have one or more connectors associated with it. This structure ++ * allows the CRTC to be controlled. ++ */ ++struct drm_crtc { ++ struct drm_device *dev; ++ struct list_head head; ++ ++ struct drm_mode_object base; ++ ++ /* framebuffer the connector is currently bound to */ ++ struct drm_framebuffer *fb; ++ ++ bool enabled; ++ ++ struct drm_display_mode mode; ++ ++ int x, y; ++ struct drm_display_mode *desired_mode; ++ int desired_x, desired_y; ++ const struct drm_crtc_funcs *funcs; ++ ++ /* CRTC gamma size for reporting to userspace */ ++ uint32_t gamma_size; ++ uint16_t *gamma_store; ++ ++ /* if you are using the helper */ ++ void *helper_private; ++}; ++ ++ ++/** ++ * drm_connector_funcs - control connectors on a given device ++ * @dpms: set power state (see drm_crtc_funcs above) ++ * @save: save connector state ++ * @restore: restore connector state ++ * @mode_valid: is this mode valid on the given connector? ++ * @mode_fixup: try to fixup proposed mode for this connector ++ * @mode_set: set this mode ++ * @detect: is this connector active? ++ * @get_modes: get mode list for this connector ++ * @set_property: property for this connector may need update ++ * @destroy: make object go away ++ * ++ * Each CRTC may have one or more connectors attached to it. The functions ++ * below allow the core DRM code to control connectors, enumerate available modes, ++ * etc. ++ */ ++struct drm_connector_funcs { ++ void (*dpms)(struct drm_connector *connector, int mode); ++ void (*save)(struct drm_connector *connector); ++ void (*restore)(struct drm_connector *connector); ++ enum drm_connector_status (*detect)(struct drm_connector *connector); ++ void (*fill_modes)(struct drm_connector *connector, uint32_t max_width, uint32_t max_height); ++ int (*set_property)(struct drm_connector *connector, struct drm_property *property, ++ uint64_t val); ++ void (*destroy)(struct drm_connector *connector); ++}; ++ ++struct drm_encoder_funcs { ++ void (*destroy)(struct drm_encoder *encoder); ++}; ++ ++#define DRM_CONNECTOR_MAX_UMODES 16 ++#define DRM_CONNECTOR_MAX_PROPERTY 16 ++#define DRM_CONNECTOR_LEN 32 ++#define DRM_CONNECTOR_MAX_ENCODER 2 ++ ++/** ++ * drm_encoder - central DRM encoder structure ++ */ ++struct drm_encoder { ++ struct drm_device *dev; ++ struct list_head head; ++ ++ struct drm_mode_object base; ++ int encoder_type; ++ uint32_t possible_crtcs; ++ uint32_t possible_clones; ++ ++ struct drm_crtc *crtc; ++ const struct drm_encoder_funcs *funcs; ++ void *helper_private; ++}; ++ ++/** ++ * drm_connector - central DRM connector control structure ++ * @crtc: CRTC this connector is currently connected to, NULL if none ++ * @interlace_allowed: can this connector handle interlaced modes? ++ * @doublescan_allowed: can this connector handle doublescan? ++ * @available_modes: modes available on this connector (from get_modes() + user) ++ * @initial_x: initial x position for this connector ++ * @initial_y: initial y position for this connector ++ * @status: connector connected? ++ * @funcs: connector control functions ++ * ++ * Each connector may be connected to one or more CRTCs, or may be clonable by ++ * another connector if they can share a CRTC. Each connector also has a specific ++ * position in the broader display (referred to as a 'screen' though it could ++ * span multiple monitors). ++ */ ++struct drm_connector { ++ struct drm_device *dev; ++ struct device kdev; ++ struct device_attribute *attr; ++ struct list_head head; ++ ++ struct drm_mode_object base; ++ ++ int connector_type; ++ int connector_type_id; ++ bool interlace_allowed; ++ bool doublescan_allowed; ++ struct list_head modes; /* list of modes on this connector */ ++ ++ int initial_x, initial_y; ++ enum drm_connector_status status; ++ ++ /* these are modes added by probing with DDC or the BIOS */ ++ struct list_head probed_modes; ++ ++ struct drm_display_info display_info; ++ const struct drm_connector_funcs *funcs; ++ ++ struct list_head user_modes; ++ struct drm_property_blob *edid_blob_ptr; ++ u32 property_ids[DRM_CONNECTOR_MAX_PROPERTY]; ++ uint64_t property_values[DRM_CONNECTOR_MAX_PROPERTY]; ++ ++ void *helper_private; ++ ++ uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER]; ++ uint32_t force_encoder_id; ++ struct drm_encoder *encoder; /* currently active encoder */ ++}; ++ ++/** ++ * struct drm_mode_set ++ * ++ * Represents a single crtc the connectors that it drives with what mode ++ * and from which framebuffer it scans out from. ++ * ++ * This is used to set modes. ++ */ ++struct drm_mode_set { ++ struct list_head head; ++ ++ struct drm_framebuffer *fb; ++ struct drm_crtc *crtc; ++ struct drm_display_mode *mode; ++ ++ uint32_t x; ++ uint32_t y; ++ ++ struct drm_connector **connectors; ++ size_t num_connectors; ++}; ++ ++/** ++ * struct drm_mode_config_funcs - configure CRTCs for a given screen layout ++ * @resize: adjust CRTCs as necessary for the proposed layout ++ * ++ * Currently only a resize hook is available. DRM will call back into the ++ * driver with a new screen width and height. If the driver can't support ++ * the proposed size, it can return false. Otherwise it should adjust ++ * the CRTC<->connector mappings as needed and update its view of the screen. ++ */ ++struct drm_mode_config_funcs { ++ struct drm_framebuffer *(*fb_create)(struct drm_device *dev, struct drm_file *file_priv, struct drm_mode_fb_cmd *mode_cmd); ++ int (*fb_changed)(struct drm_device *dev); ++}; ++ ++struct drm_mode_group { ++ uint32_t num_crtcs; ++ uint32_t num_encoders; ++ uint32_t num_connectors; ++ ++ /* list of object IDs for this group */ ++ uint32_t *id_list; ++}; ++ ++/** ++ * drm_mode_config - Mode configuration control structure ++ * ++ */ ++struct drm_mode_config { ++ struct mutex mutex; /* protects configuration and IDR */ ++ struct idr crtc_idr; /* use this idr for all IDs, fb, crtc, connector, modes - just makes life easier */ ++ /* this is limited to one for now */ ++ int num_fb; ++ struct list_head fb_list; ++ int num_connector; ++ struct list_head connector_list; ++ int num_encoder; ++ struct list_head encoder_list; ++ ++ int num_crtc; ++ struct list_head crtc_list; ++ ++ struct list_head property_list; ++ ++ /* in-kernel framebuffers - hung of filp_head in drm_framebuffer */ ++ struct list_head fb_kernel_list; ++ ++ int min_width, min_height; ++ int max_width, max_height; ++ struct drm_mode_config_funcs *funcs; ++ unsigned long fb_base; ++ ++ /* pointers to standard properties */ ++ struct list_head property_blob_list; ++ struct drm_property *edid_property; ++ struct drm_property *dpms_property; ++ ++ /* DVI-I properties */ ++ struct drm_property *dvi_i_subconnector_property; ++ struct drm_property *dvi_i_select_subconnector_property; ++ ++ /* TV properties */ ++ struct drm_property *tv_subconnector_property; ++ struct drm_property *tv_select_subconnector_property; ++ struct drm_property *tv_mode_property; ++ struct drm_property *tv_left_margin_property; ++ struct drm_property *tv_right_margin_property; ++ struct drm_property *tv_top_margin_property; ++ struct drm_property *tv_bottom_margin_property; ++ ++ /* Optional properties */ ++ struct drm_property *scaling_mode_property; ++ struct drm_property *dithering_mode_property; ++}; ++ ++#define obj_to_crtc(x) container_of(x, struct drm_crtc, base) ++#define obj_to_connector(x) container_of(x, struct drm_connector, base) ++#define obj_to_encoder(x) container_of(x, struct drm_encoder, base) ++#define obj_to_mode(x) container_of(x, struct drm_display_mode, base) ++#define obj_to_fb(x) container_of(x, struct drm_framebuffer, base) ++#define obj_to_property(x) container_of(x, struct drm_property, base) ++#define obj_to_blob(x) container_of(x, struct drm_property_blob, base) ++ ++ ++extern void drm_crtc_init(struct drm_device *dev, ++ struct drm_crtc *crtc, ++ const struct drm_crtc_funcs *funcs); ++extern void drm_crtc_cleanup(struct drm_crtc *crtc); ++ ++extern void drm_connector_init(struct drm_device *dev, ++ struct drm_connector *connector, ++ const struct drm_connector_funcs *funcs, ++ int connector_type); ++ ++extern void drm_connector_cleanup(struct drm_connector *connector); ++ ++extern void drm_encoder_init(struct drm_device *dev, ++ struct drm_encoder *encoder, ++ const struct drm_encoder_funcs *funcs, ++ int encoder_type); ++ ++extern void drm_encoder_cleanup(struct drm_encoder *encoder); ++ ++extern char *drm_get_connector_name(struct drm_connector *connector); ++extern char *drm_get_dpms_name(int val); ++extern char *drm_get_dvi_i_subconnector_name(int val); ++extern char *drm_get_dvi_i_select_name(int val); ++extern char *drm_get_tv_subconnector_name(int val); ++extern char *drm_get_tv_select_name(int val); ++extern void drm_fb_release(struct file *filp); ++extern int drm_mode_group_init_legacy_group(struct drm_device *dev, struct drm_mode_group *group); ++extern struct edid *drm_get_edid(struct drm_connector *connector, ++ struct i2c_adapter *adapter); ++extern unsigned char *drm_do_probe_ddc_edid(struct i2c_adapter *adapter); ++extern int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid); ++extern void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode); ++extern void drm_mode_remove(struct drm_connector *connector, struct drm_display_mode *mode); ++extern struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev, ++ struct drm_display_mode *mode); ++extern void drm_mode_debug_printmodeline(struct drm_display_mode *mode); ++extern void drm_mode_config_init(struct drm_device *dev); ++extern void drm_mode_config_cleanup(struct drm_device *dev); ++extern void drm_mode_set_name(struct drm_display_mode *mode); ++extern bool drm_mode_equal(struct drm_display_mode *mode1, struct drm_display_mode *mode2); ++extern int drm_mode_width(struct drm_display_mode *mode); ++extern int drm_mode_height(struct drm_display_mode *mode); ++ ++/* for us by fb module */ ++extern int drm_mode_attachmode_crtc(struct drm_device *dev, ++ struct drm_crtc *crtc, ++ struct drm_display_mode *mode); ++extern int drm_mode_detachmode_crtc(struct drm_device *dev, struct drm_display_mode *mode); ++ ++extern struct drm_display_mode *drm_mode_create(struct drm_device *dev); ++extern void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode); ++extern void drm_mode_list_concat(struct list_head *head, ++ struct list_head *new); ++extern void drm_mode_validate_size(struct drm_device *dev, ++ struct list_head *mode_list, ++ int maxX, int maxY, int maxPitch); ++extern void drm_mode_prune_invalid(struct drm_device *dev, ++ struct list_head *mode_list, bool verbose); ++extern void drm_mode_sort(struct list_head *mode_list); ++extern int drm_mode_vrefresh(struct drm_display_mode *mode); ++extern void drm_mode_set_crtcinfo(struct drm_display_mode *p, ++ int adjust_flags); ++extern void drm_mode_connector_list_update(struct drm_connector *connector); ++extern int drm_mode_connector_update_edid_property(struct drm_connector *connector, ++ struct edid *edid); ++extern int drm_connector_property_set_value(struct drm_connector *connector, ++ struct drm_property *property, ++ uint64_t value); ++extern int drm_connector_property_get_value(struct drm_connector *connector, ++ struct drm_property *property, ++ uint64_t *value); ++extern struct drm_display_mode *drm_crtc_mode_create(struct drm_device *dev); ++extern void drm_framebuffer_set_object(struct drm_device *dev, ++ unsigned long handle); ++extern int drm_framebuffer_init(struct drm_device *dev, ++ struct drm_framebuffer *fb, ++ const struct drm_framebuffer_funcs *funcs); ++extern void drm_framebuffer_cleanup(struct drm_framebuffer *fb); ++extern int drmfb_probe(struct drm_device *dev, struct drm_crtc *crtc); ++extern int drmfb_remove(struct drm_device *dev, struct drm_framebuffer *fb); ++extern void drm_crtc_probe_connector_modes(struct drm_device *dev, int maxX, int maxY); ++extern bool drm_crtc_in_use(struct drm_crtc *crtc); ++ ++extern int drm_connector_attach_property(struct drm_connector *connector, ++ struct drm_property *property, uint64_t init_val); ++extern struct drm_property *drm_property_create(struct drm_device *dev, int flags, ++ const char *name, int num_values); ++extern void drm_property_destroy(struct drm_device *dev, struct drm_property *property); ++extern int drm_property_add_enum(struct drm_property *property, int index, ++ uint64_t value, const char *name); ++extern int drm_mode_create_dvi_i_properties(struct drm_device *dev); ++extern int drm_mode_create_tv_properties(struct drm_device *dev, int num_formats, ++ char *formats[]); ++extern int drm_mode_create_scaling_mode_property(struct drm_device *dev); ++extern int drm_mode_create_dithering_property(struct drm_device *dev); ++extern char *drm_get_encoder_name(struct drm_encoder *encoder); ++ ++extern int drm_mode_connector_attach_encoder(struct drm_connector *connector, ++ struct drm_encoder *encoder); ++extern void drm_mode_connector_detach_encoder(struct drm_connector *connector, ++ struct drm_encoder *encoder); ++extern bool drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc, ++ int gamma_size); ++extern void *drm_mode_object_find(struct drm_device *dev, uint32_t id, uint32_t type); ++/* IOCTLs */ ++extern int drm_mode_getresources(struct drm_device *dev, ++ void *data, struct drm_file *file_priv); ++ ++extern int drm_mode_getcrtc(struct drm_device *dev, ++ void *data, struct drm_file *file_priv); ++extern int drm_mode_getconnector(struct drm_device *dev, ++ void *data, struct drm_file *file_priv); ++extern int drm_mode_setcrtc(struct drm_device *dev, ++ void *data, struct drm_file *file_priv); ++extern int drm_mode_cursor_ioctl(struct drm_device *dev, ++ void *data, struct drm_file *file_priv); ++extern int drm_mode_addfb(struct drm_device *dev, ++ void *data, struct drm_file *file_priv); ++extern int drm_mode_rmfb(struct drm_device *dev, ++ void *data, struct drm_file *file_priv); ++extern int drm_mode_getfb(struct drm_device *dev, ++ void *data, struct drm_file *file_priv); ++extern int drm_mode_addmode_ioctl(struct drm_device *dev, ++ void *data, struct drm_file *file_priv); ++extern int drm_mode_rmmode_ioctl(struct drm_device *dev, ++ void *data, struct drm_file *file_priv); ++extern int drm_mode_attachmode_ioctl(struct drm_device *dev, ++ void *data, struct drm_file *file_priv); ++extern int drm_mode_detachmode_ioctl(struct drm_device *dev, ++ void *data, struct drm_file *file_priv); ++ ++extern int drm_mode_getproperty_ioctl(struct drm_device *dev, ++ void *data, struct drm_file *file_priv); ++extern int drm_mode_getblob_ioctl(struct drm_device *dev, ++ void *data, struct drm_file *file_priv); ++extern int drm_mode_connector_property_set_ioctl(struct drm_device *dev, ++ void *data, struct drm_file *file_priv); ++extern int drm_mode_hotplug_ioctl(struct drm_device *dev, ++ void *data, struct drm_file *file_priv); ++extern int drm_mode_replacefb(struct drm_device *dev, ++ void *data, struct drm_file *file_priv); ++extern int drm_mode_getencoder(struct drm_device *dev, ++ void *data, struct drm_file *file_priv); ++extern int drm_mode_gamma_get_ioctl(struct drm_device *dev, ++ void *data, struct drm_file *file_priv); ++extern int drm_mode_gamma_set_ioctl(struct drm_device *dev, ++ void *data, struct drm_file *file_priv); ++#endif /* __DRM_CRTC_H__ */ +diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h +new file mode 100644 +index 0000000..a341828 +--- /dev/null ++++ b/include/drm/drm_crtc_helper.h +@@ -0,0 +1,121 @@ ++/* ++ * Copyright © 2006 Keith Packard ++ * Copyright © 2007-2008 Dave Airlie ++ * Copyright © 2007-2008 Intel Corporation ++ * Jesse Barnes ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++/* ++ * The DRM mode setting helper functions are common code for drivers to use if ++ * they wish. Drivers are not forced to use this code in their ++ * implementations but it would be useful if they code they do use at least ++ * provides a consistent interface and operation to userspace ++ */ ++ ++#ifndef __DRM_CRTC_HELPER_H__ ++#define __DRM_CRTC_HELPER_H__ ++ ++#include ++#include ++#include ++#include ++ ++#include ++ ++struct drm_crtc_helper_funcs { ++ /* ++ * Control power levels on the CRTC. If the mode passed in is ++ * unsupported, the provider must use the next lowest power level. ++ */ ++ void (*dpms)(struct drm_crtc *crtc, int mode); ++ void (*prepare)(struct drm_crtc *crtc); ++ void (*commit)(struct drm_crtc *crtc); ++ ++ /* Provider can fixup or change mode timings before modeset occurs */ ++ bool (*mode_fixup)(struct drm_crtc *crtc, ++ struct drm_display_mode *mode, ++ struct drm_display_mode *adjusted_mode); ++ /* Actually set the mode */ ++ void (*mode_set)(struct drm_crtc *crtc, struct drm_display_mode *mode, ++ struct drm_display_mode *adjusted_mode, int x, int y); ++ ++ /* Move the crtc on the current fb to the given position *optional* */ ++ void (*mode_set_base)(struct drm_crtc *crtc, int x, int y); ++}; ++ ++struct drm_encoder_helper_funcs { ++ void (*dpms)(struct drm_encoder *encoder, int mode); ++ void (*save)(struct drm_encoder *encoder); ++ void (*restore)(struct drm_encoder *encoder); ++ ++ bool (*mode_fixup)(struct drm_encoder *encoder, ++ struct drm_display_mode *mode, ++ struct drm_display_mode *adjusted_mode); ++ void (*prepare)(struct drm_encoder *encoder); ++ void (*commit)(struct drm_encoder *encoder); ++ void (*mode_set)(struct drm_encoder *encoder, ++ struct drm_display_mode *mode, ++ struct drm_display_mode *adjusted_mode); ++ /* detect for DAC style encoders */ ++ enum drm_connector_status (*detect)(struct drm_encoder *encoder, ++ struct drm_connector *connector); ++}; ++ ++struct drm_connector_helper_funcs { ++ int (*get_modes)(struct drm_connector *connector); ++ int (*mode_valid)(struct drm_connector *connector, ++ struct drm_display_mode *mode); ++ struct drm_encoder *(*best_encoder)(struct drm_connector *connector); ++}; ++ ++extern void drm_helper_probe_single_connector_modes(struct drm_connector *connector, uint32_t maxX, uint32_t maxY); ++extern void drm_helper_disable_unused_functions(struct drm_device *dev); ++extern int drm_helper_hotplug_stage_two(struct drm_device *dev); ++extern bool drm_helper_initial_config(struct drm_device *dev, bool can_grow); ++extern int drm_crtc_helper_set_config(struct drm_mode_set *set); ++extern bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, ++ struct drm_display_mode *mode, ++ int x, int y); ++extern bool drm_helper_crtc_in_use(struct drm_crtc *crtc); ++ ++extern int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb, ++ struct drm_mode_fb_cmd *mode_cmd); ++ ++static inline void drm_crtc_helper_add(struct drm_crtc *crtc, ++ const struct drm_crtc_helper_funcs *funcs) ++{ ++ crtc->helper_private = (void *)funcs; ++} ++ ++static inline void drm_encoder_helper_add(struct drm_encoder *encoder, ++ const struct drm_encoder_helper_funcs *funcs) ++{ ++ encoder->helper_private = (void *)funcs; ++} ++ ++static inline void drm_connector_helper_add(struct drm_connector *connector, ++ const struct drm_connector_helper_funcs *funcs) ++{ ++ connector->helper_private = (void *)funcs; ++} ++ ++extern int drm_helper_resume_force_mode(struct drm_device *dev); ++#endif +diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h +new file mode 100644 +index 0000000..c707c15 +--- /dev/null ++++ b/include/drm/drm_edid.h +@@ -0,0 +1,202 @@ ++/* ++ * Copyright © 2007-2008 Intel Corporation ++ * Jesse Barnes ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++#ifndef __DRM_EDID_H__ ++#define __DRM_EDID_H__ ++ ++#include ++ ++#define EDID_LENGTH 128 ++#define DDC_ADDR 0x50 ++ ++#ifdef BIG_ENDIAN ++#error "EDID structure is little endian, need big endian versions" ++#else ++ ++struct est_timings { ++ u8 t1; ++ u8 t2; ++ u8 mfg_rsvd; ++} __attribute__((packed)); ++ ++struct std_timing { ++ u8 hsize; /* need to multiply by 8 then add 248 */ ++ u8 vfreq:6; /* need to add 60 */ ++ u8 aspect_ratio:2; /* 00=16:10, 01=4:3, 10=5:4, 11=16:9 */ ++} __attribute__((packed)); ++ ++/* If detailed data is pixel timing */ ++struct detailed_pixel_timing { ++ u8 hactive_lo; ++ u8 hblank_lo; ++ u8 hblank_hi:4; ++ u8 hactive_hi:4; ++ u8 vactive_lo; ++ u8 vblank_lo; ++ u8 vblank_hi:4; ++ u8 vactive_hi:4; ++ u8 hsync_offset_lo; ++ u8 hsync_pulse_width_lo; ++ u8 vsync_pulse_width_lo:4; ++ u8 vsync_offset_lo:4; ++ u8 hsync_pulse_width_hi:2; ++ u8 hsync_offset_hi:2; ++ u8 vsync_pulse_width_hi:2; ++ u8 vsync_offset_hi:2; ++ u8 width_mm_lo; ++ u8 height_mm_lo; ++ u8 height_mm_hi:4; ++ u8 width_mm_hi:4; ++ u8 hborder; ++ u8 vborder; ++ u8 unknown0:1; ++ u8 vsync_positive:1; ++ u8 hsync_positive:1; ++ u8 separate_sync:2; ++ u8 stereo:1; ++ u8 unknown6:1; ++ u8 interlaced:1; ++} __attribute__((packed)); ++ ++/* If it's not pixel timing, it'll be one of the below */ ++struct detailed_data_string { ++ u8 str[13]; ++} __attribute__((packed)); ++ ++struct detailed_data_monitor_range { ++ u8 min_vfreq; ++ u8 max_vfreq; ++ u8 min_hfreq_khz; ++ u8 max_hfreq_khz; ++ u8 pixel_clock_mhz; /* need to multiply by 10 */ ++ u16 sec_gtf_toggle; /* A000=use above, 20=use below */ /* FIXME: byte order */ ++ u8 hfreq_start_khz; /* need to multiply by 2 */ ++ u8 c; /* need to divide by 2 */ ++ u16 m; /* FIXME: byte order */ ++ u8 k; ++ u8 j; /* need to divide by 2 */ ++} __attribute__((packed)); ++ ++struct detailed_data_wpindex { ++ u8 white_y_lo:2; ++ u8 white_x_lo:2; ++ u8 pad:4; ++ u8 white_x_hi; ++ u8 white_y_hi; ++ u8 gamma; /* need to divide by 100 then add 1 */ ++} __attribute__((packed)); ++ ++struct detailed_data_color_point { ++ u8 windex1; ++ u8 wpindex1[3]; ++ u8 windex2; ++ u8 wpindex2[3]; ++} __attribute__((packed)); ++ ++struct detailed_non_pixel { ++ u8 pad1; ++ u8 type; /* ff=serial, fe=string, fd=monitor range, fc=monitor name ++ fb=color point data, fa=standard timing data, ++ f9=undefined, f8=mfg. reserved */ ++ u8 pad2; ++ union { ++ struct detailed_data_string str; ++ struct detailed_data_monitor_range range; ++ struct detailed_data_wpindex color; ++ struct std_timing timings[5]; ++ } data; ++} __attribute__((packed)); ++ ++#define EDID_DETAIL_STD_MODES 0xfa ++#define EDID_DETAIL_MONITOR_CPDATA 0xfb ++#define EDID_DETAIL_MONITOR_NAME 0xfc ++#define EDID_DETAIL_MONITOR_RANGE 0xfd ++#define EDID_DETAIL_MONITOR_STRING 0xfe ++#define EDID_DETAIL_MONITOR_SERIAL 0xff ++ ++struct detailed_timing { ++ u16 pixel_clock; /* need to multiply by 10 KHz */ /* FIXME: byte order */ ++ union { ++ struct detailed_pixel_timing pixel_data; ++ struct detailed_non_pixel other_data; ++ } data; ++} __attribute__((packed)); ++ ++struct edid { ++ u8 header[8]; ++ /* Vendor & product info */ ++ u8 mfg_id[2]; ++ u8 prod_code[2]; ++ u32 serial; /* FIXME: byte order */ ++ u8 mfg_week; ++ u8 mfg_year; ++ /* EDID version */ ++ u8 version; ++ u8 revision; ++ /* Display info: */ ++ /* input definition */ ++ u8 serration_vsync:1; ++ u8 sync_on_green:1; ++ u8 composite_sync:1; ++ u8 separate_syncs:1; ++ u8 blank_to_black:1; ++ u8 video_level:2; ++ u8 digital:1; /* bits below must be zero if set */ ++ u8 width_cm; ++ u8 height_cm; ++ u8 gamma; ++ /* feature support */ ++ u8 default_gtf:1; ++ u8 preferred_timing:1; ++ u8 standard_color:1; ++ u8 display_type:2; /* 00=mono, 01=rgb, 10=non-rgb, 11=unknown */ ++ u8 pm_active_off:1; ++ u8 pm_suspend:1; ++ u8 pm_standby:1; ++ /* Color characteristics */ ++ u8 red_green_lo; ++ u8 black_white_lo; ++ u8 red_x; ++ u8 red_y; ++ u8 green_x; ++ u8 green_y; ++ u8 blue_x; ++ u8 blue_y; ++ u8 white_x; ++ u8 white_y; ++ /* Est. timings and mfg rsvd timings*/ ++ struct est_timings established_timings; ++ /* Standard timings 1-8*/ ++ struct std_timing standard_timings[8]; ++ /* Detailing timings 1-4 */ ++ struct detailed_timing detailed_timings[4]; ++ /* Number of 128 byte ext. blocks */ ++ u8 extensions; ++ /* Checksum */ ++ u8 checksum; ++} __attribute__((packed)); ++ ++#endif /* little endian structs */ ++ ++#define EDID_PRODUCT_ID(e) ((e)->prod_code[0] | ((e)->prod_code[1] << 8)) ++ ++#endif /* __DRM_EDID_H__ */ +diff --git a/include/drm/drm_mode.h b/include/drm/drm_mode.h +new file mode 100644 +index 0000000..601d2bd +--- /dev/null ++++ b/include/drm/drm_mode.h +@@ -0,0 +1,271 @@ ++/* ++ * Copyright (c) 2007 Dave Airlie ++ * Copyright (c) 2007 Jakob Bornecrantz ++ * Copyright (c) 2008 Red Hat Inc. ++ * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA ++ * Copyright (c) 2007-2008 Intel Corporation ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE ++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING ++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS ++ * IN THE SOFTWARE. ++ */ ++ ++#ifndef _DRM_MODE_H ++#define _DRM_MODE_H ++ ++#if !defined(__KERNEL__) && !defined(_KERNEL) ++#include ++#else ++#include ++#endif ++ ++#define DRM_DISPLAY_INFO_LEN 32 ++#define DRM_CONNECTOR_NAME_LEN 32 ++#define DRM_DISPLAY_MODE_LEN 32 ++#define DRM_PROP_NAME_LEN 32 ++ ++#define DRM_MODE_TYPE_BUILTIN (1<<0) ++#define DRM_MODE_TYPE_CLOCK_C ((1<<1) | DRM_MODE_TYPE_BUILTIN) ++#define DRM_MODE_TYPE_CRTC_C ((1<<2) | DRM_MODE_TYPE_BUILTIN) ++#define DRM_MODE_TYPE_PREFERRED (1<<3) ++#define DRM_MODE_TYPE_DEFAULT (1<<4) ++#define DRM_MODE_TYPE_USERDEF (1<<5) ++#define DRM_MODE_TYPE_DRIVER (1<<6) ++ ++/* Video mode flags */ ++/* bit compatible with the xorg definitions. */ ++#define DRM_MODE_FLAG_PHSYNC (1<<0) ++#define DRM_MODE_FLAG_NHSYNC (1<<1) ++#define DRM_MODE_FLAG_PVSYNC (1<<2) ++#define DRM_MODE_FLAG_NVSYNC (1<<3) ++#define DRM_MODE_FLAG_INTERLACE (1<<4) ++#define DRM_MODE_FLAG_DBLSCAN (1<<5) ++#define DRM_MODE_FLAG_CSYNC (1<<6) ++#define DRM_MODE_FLAG_PCSYNC (1<<7) ++#define DRM_MODE_FLAG_NCSYNC (1<<8) ++#define DRM_MODE_FLAG_HSKEW (1<<9) /* hskew provided */ ++#define DRM_MODE_FLAG_BCAST (1<<10) ++#define DRM_MODE_FLAG_PIXMUX (1<<11) ++#define DRM_MODE_FLAG_DBLCLK (1<<12) ++#define DRM_MODE_FLAG_CLKDIV2 (1<<13) ++ ++/* DPMS flags */ ++/* bit compatible with the xorg definitions. */ ++#define DRM_MODE_DPMS_ON 0 ++#define DRM_MODE_DPMS_STANDBY 1 ++#define DRM_MODE_DPMS_SUSPEND 2 ++#define DRM_MODE_DPMS_OFF 3 ++ ++/* Scaling mode options */ ++#define DRM_MODE_SCALE_NON_GPU 0 ++#define DRM_MODE_SCALE_FULLSCREEN 1 ++#define DRM_MODE_SCALE_NO_SCALE 2 ++#define DRM_MODE_SCALE_ASPECT 3 ++ ++/* Dithering mode options */ ++#define DRM_MODE_DITHERING_OFF 0 ++#define DRM_MODE_DITHERING_ON 1 ++ ++struct drm_mode_modeinfo { ++ uint32_t clock; ++ uint16_t hdisplay, hsync_start, hsync_end, htotal, hskew; ++ uint16_t vdisplay, vsync_start, vsync_end, vtotal, vscan; ++ ++ uint32_t vrefresh; /* vertical refresh * 1000 */ ++ ++ uint32_t flags; ++ uint32_t type; ++ char name[DRM_DISPLAY_MODE_LEN]; ++}; ++ ++struct drm_mode_card_res { ++ uint64_t fb_id_ptr; ++ uint64_t crtc_id_ptr; ++ uint64_t connector_id_ptr; ++ uint64_t encoder_id_ptr; ++ uint32_t count_fbs; ++ uint32_t count_crtcs; ++ uint32_t count_connectors; ++ uint32_t count_encoders; ++ uint32_t min_width, max_width; ++ uint32_t min_height, max_height; ++}; ++ ++struct drm_mode_crtc { ++ uint64_t set_connectors_ptr; ++ uint32_t count_connectors; ++ ++ uint32_t crtc_id; /**< Id */ ++ uint32_t fb_id; /**< Id of framebuffer */ ++ ++ uint32_t x, y; /**< Position on the frameuffer */ ++ ++ uint32_t gamma_size; ++ uint32_t mode_valid; ++ struct drm_mode_modeinfo mode; ++}; ++ ++#define DRM_MODE_ENCODER_NONE 0 ++#define DRM_MODE_ENCODER_DAC 1 ++#define DRM_MODE_ENCODER_TMDS 2 ++#define DRM_MODE_ENCODER_LVDS 3 ++#define DRM_MODE_ENCODER_TVDAC 4 ++ ++struct drm_mode_get_encoder { ++ uint32_t encoder_id; ++ uint32_t encoder_type; ++ ++ uint32_t crtc_id; /**< Id of crtc */ ++ ++ uint32_t possible_crtcs; ++ uint32_t possible_clones; ++}; ++ ++/* This is for connectors with multiple signal types. */ ++/* Try to match DRM_MODE_CONNECTOR_X as closely as possible. */ ++#define DRM_MODE_SUBCONNECTOR_Automatic 0 ++#define DRM_MODE_SUBCONNECTOR_Unknown 0 ++#define DRM_MODE_SUBCONNECTOR_DVID 3 ++#define DRM_MODE_SUBCONNECTOR_DVIA 4 ++#define DRM_MODE_SUBCONNECTOR_Composite 5 ++#define DRM_MODE_SUBCONNECTOR_SVIDEO 6 ++#define DRM_MODE_SUBCONNECTOR_Component 8 ++ ++#define DRM_MODE_CONNECTOR_Unknown 0 ++#define DRM_MODE_CONNECTOR_VGA 1 ++#define DRM_MODE_CONNECTOR_DVII 2 ++#define DRM_MODE_CONNECTOR_DVID 3 ++#define DRM_MODE_CONNECTOR_DVIA 4 ++#define DRM_MODE_CONNECTOR_Composite 5 ++#define DRM_MODE_CONNECTOR_SVIDEO 6 ++#define DRM_MODE_CONNECTOR_LVDS 7 ++#define DRM_MODE_CONNECTOR_Component 8 ++#define DRM_MODE_CONNECTOR_9PinDIN 9 ++#define DRM_MODE_CONNECTOR_DisplayPort 10 ++#define DRM_MODE_CONNECTOR_HDMIA 11 ++#define DRM_MODE_CONNECTOR_HDMIB 12 ++ ++struct drm_mode_get_connector { ++ ++ uint64_t encoders_ptr; ++ uint64_t modes_ptr; ++ uint64_t props_ptr; ++ uint64_t prop_values_ptr; ++ ++ uint32_t count_modes; ++ uint32_t count_props; ++ uint32_t count_encoders; ++ ++ uint32_t encoder_id; /**< Current Encoder */ ++ uint32_t connector_id; /**< Id */ ++ uint32_t connector_type; ++ uint32_t connector_type_id; ++ ++ uint32_t connection; ++ uint32_t mm_width, mm_height; /**< HxW in millimeters */ ++ uint32_t subpixel; ++}; ++ ++#define DRM_MODE_PROP_PENDING (1<<0) ++#define DRM_MODE_PROP_RANGE (1<<1) ++#define DRM_MODE_PROP_IMMUTABLE (1<<2) ++#define DRM_MODE_PROP_ENUM (1<<3) /* enumerated type with text strings */ ++#define DRM_MODE_PROP_BLOB (1<<4) ++ ++struct drm_mode_property_enum { ++ uint64_t value; ++ char name[DRM_PROP_NAME_LEN]; ++}; ++ ++struct drm_mode_get_property { ++ uint64_t values_ptr; /* values and blob lengths */ ++ uint64_t enum_blob_ptr; /* enum and blob id ptrs */ ++ ++ uint32_t prop_id; ++ uint32_t flags; ++ char name[DRM_PROP_NAME_LEN]; ++ ++ uint32_t count_values; ++ uint32_t count_enum_blobs; ++}; ++ ++struct drm_mode_connector_set_property { ++ uint64_t value; ++ uint32_t prop_id; ++ uint32_t connector_id; ++}; ++ ++struct drm_mode_get_blob { ++ uint32_t blob_id; ++ uint32_t length; ++ uint64_t data; ++}; ++ ++struct drm_mode_fb_cmd { ++ uint32_t fb_id; ++ uint32_t width, height; ++ uint32_t pitch; ++ uint32_t bpp; ++ uint32_t depth; ++ /* driver specific handle */ ++ uint32_t handle; ++}; ++ ++struct drm_mode_mode_cmd { ++ uint32_t connector_id; ++ struct drm_mode_modeinfo mode; ++}; ++ ++#define DRM_MODE_CURSOR_BO (1<<0) ++#define DRM_MODE_CURSOR_MOVE (1<<1) ++ ++/* ++ * depending on the value in flags diffrent members are used. ++ * ++ * CURSOR_BO uses ++ * crtc ++ * width ++ * height ++ * handle - if 0 turns the cursor of ++ * ++ * CURSOR_MOVE uses ++ * crtc ++ * x ++ * y ++ */ ++struct drm_mode_cursor { ++ uint32_t flags; ++ uint32_t crtc_id; ++ int32_t x; ++ int32_t y; ++ uint32_t width; ++ uint32_t height; ++ /* driver specific handle */ ++ uint32_t handle; ++}; ++ ++struct drm_mode_crtc_lut { ++ uint32_t crtc_id; ++ uint32_t gamma_size; ++ ++ /* pointers to arrays */ ++ uint64_t red; ++ uint64_t green; ++ uint64_t blue; ++}; ++ ++#endif +diff --git a/include/drm/drm_sarea.h b/include/drm/drm_sarea.h +index 4800373..ee5389d 100644 +--- a/include/drm/drm_sarea.h ++++ b/include/drm/drm_sarea.h +@@ -36,12 +36,12 @@ + + /* SAREA area needs to be at least a page */ + #if defined(__alpha__) +-#define SAREA_MAX 0x2000 ++#define SAREA_MAX 0x2000U + #elif defined(__ia64__) +-#define SAREA_MAX 0x10000 /* 64kB */ ++#define SAREA_MAX 0x10000U /* 64kB */ + #else + /* Intel 830M driver needs at least 8k SAREA */ +-#define SAREA_MAX 0x2000 ++#define SAREA_MAX 0x2000U + #endif + + /** Maximum number of drawables in the SAREA */ +diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h +index 152b34d..b3bcf72 100644 +--- a/include/drm/i915_drm.h ++++ b/include/drm/i915_drm.h +@@ -113,8 +113,31 @@ typedef struct _drm_i915_sarea { + int pipeB_y; + int pipeB_w; + int pipeB_h; ++ ++ /* fill out some space for old userspace triple buffer */ ++ drm_handle_t unused_handle; ++ uint32_t unused1, unused2, unused3; ++ ++ /* buffer object handles for static buffers. May change ++ * over the lifetime of the client. ++ */ ++ uint32_t front_bo_handle; ++ uint32_t back_bo_handle; ++ uint32_t unused_bo_handle; ++ uint32_t depth_bo_handle; ++ + } drm_i915_sarea_t; + ++/* due to userspace building against these headers we need some compat here */ ++#define planeA_x pipeA_x ++#define planeA_y pipeA_y ++#define planeA_w pipeA_w ++#define planeA_h pipeA_h ++#define planeB_x pipeB_x ++#define planeB_y pipeB_y ++#define planeB_w pipeB_w ++#define planeB_h pipeB_h ++ + /* Flags for perf_boxes + */ + #define I915_BOX_RING_EMPTY 0x1 +@@ -160,6 +183,7 @@ typedef struct _drm_i915_sarea { + #define DRM_I915_GEM_SET_TILING 0x21 + #define DRM_I915_GEM_GET_TILING 0x22 + #define DRM_I915_GEM_GET_APERTURE 0x23 ++#define DRM_I915_GEM_MMAP_GTT 0x24 + + #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) + #define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) +@@ -177,6 +201,8 @@ typedef struct _drm_i915_sarea { + #define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t) + #define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t) + #define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t) ++#define DRM_IOCTL_I915_GEM_INIT DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init) ++#define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer) + #define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin) + #define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin) + #define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy) +@@ -187,6 +213,7 @@ typedef struct _drm_i915_sarea { + #define DRM_IOCTL_I915_GEM_PREAD DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread) + #define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite) + #define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap) ++#define DRM_IOCTL_I915_GEM_MMAP_GTT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_gtt) + #define DRM_IOCTL_I915_GEM_SET_DOMAIN DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain) + #define DRM_IOCTL_I915_GEM_SW_FINISH DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish) + #define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling) +@@ -196,7 +223,7 @@ typedef struct _drm_i915_sarea { + /* Allow drivers to submit batchbuffers directly to hardware, relying + * on the security mechanisms provided by hardware. + */ +-typedef struct _drm_i915_batchbuffer { ++typedef struct drm_i915_batchbuffer { + int start; /* agp offset */ + int used; /* nr bytes in use */ + int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */ +@@ -382,6 +409,18 @@ struct drm_i915_gem_mmap { + uint64_t addr_ptr; + }; + ++struct drm_i915_gem_mmap_gtt { ++ /** Handle for the object being mapped. */ ++ uint32_t handle; ++ uint32_t pad; ++ /** ++ * Fake offset to use for subsequent mmap call ++ * ++ * This is a fixed-size type for 32/64 compatibility. ++ */ ++ uint64_t offset; ++}; ++ + struct drm_i915_gem_set_domain { + /** Handle for the object */ + uint32_t handle; +diff --git a/include/linux/console.h b/include/linux/console.h +index 248e6e3..a67a90c 100644 +--- a/include/linux/console.h ++++ b/include/linux/console.h +@@ -153,4 +153,8 @@ void vcs_remove_sysfs(struct tty_struct *tty); + #define VESA_HSYNC_SUSPEND 2 + #define VESA_POWERDOWN 3 + ++#ifdef CONFIG_VGA_CONSOLE ++extern bool vgacon_text_force(void); ++#endif ++ + #endif /* _LINUX_CONSOLE_H */ diff --git a/sys-kernel/geos_one-sources/files/drm-nouveau.patch b/sys-kernel/geos_one-sources/files/drm-nouveau.patch new file mode 100644 index 00000000..971f5d5c --- /dev/null +++ b/sys-kernel/geos_one-sources/files/drm-nouveau.patch @@ -0,0 +1,15641 @@ +diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig +index 649757f..014fa6f 100644 +--- a/drivers/gpu/drm/Kconfig ++++ b/drivers/gpu/drm/Kconfig +@@ -113,3 +113,9 @@ config DRM_SAVAGE + help + Choose this option if you have a Savage3D/4/SuperSavage/Pro/Twister + chipset. If M is selected the module will be called savage. ++ ++config DRM_NOUVEAU ++ tristate "Nouveau (nvidia) cards" ++ depends on DRM ++ help ++ Choose for nvidia support +diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile +index 48567a9..e68042b 100644 +--- a/drivers/gpu/drm/Makefile ++++ b/drivers/gpu/drm/Makefile +@@ -26,4 +26,5 @@ obj-$(CONFIG_DRM_I915) += i915/ + obj-$(CONFIG_DRM_SIS) += sis/ + obj-$(CONFIG_DRM_SAVAGE)+= savage/ + obj-$(CONFIG_DRM_VIA) +=via/ ++obj-$(CONFIG_DRM_NOUVEAU) += nouveau/ + +diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c +index 112ba7a..6205d56 100644 +--- a/drivers/gpu/drm/drm_bufs.c ++++ b/drivers/gpu/drm/drm_bufs.c +@@ -49,8 +49,8 @@ unsigned long drm_get_resource_len(struct drm_device *dev, unsigned int resource + + EXPORT_SYMBOL(drm_get_resource_len); + +-static struct drm_map_list *drm_find_matching_map(struct drm_device *dev, +- drm_local_map_t *map) ++struct drm_map_list *drm_find_matching_map(struct drm_device *dev, ++ drm_local_map_t *map) + { + struct drm_map_list *entry; + list_for_each_entry(entry, &dev->maplist, head) { +@@ -63,6 +63,7 @@ static struct drm_map_list *drm_find_matching_map(struct drm_device *dev, + + return NULL; + } ++EXPORT_SYMBOL(drm_find_matching_map); + + static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash, + unsigned long user_token, int hashed_handle) +diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile +new file mode 100644 +index 0000000..f01f82a +--- /dev/null ++++ b/drivers/gpu/drm/nouveau/Makefile +@@ -0,0 +1,19 @@ ++# ++# Makefile for the drm device driver. This driver provides support for the ++# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. ++ ++ccflags-y := -Iinclude/drm ++nouveau-y := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o \ ++ nouveau_object.o nouveau_irq.o nouveau_notifier.o nouveau_swmthd.o \ ++ nouveau_sgdma.o nouveau_dma.o nouveau_bo.o nouveau_fence.o \ ++ nv04_timer.o \ ++ nv04_mc.o nv40_mc.o nv50_mc.o \ ++ nv04_fb.o nv10_fb.o nv40_fb.o \ ++ nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o \ ++ nv04_graph.o nv10_graph.o nv20_graph.o \ ++ nv40_graph.o nv50_graph.o \ ++ nv04_instmem.o nv50_instmem.o ++ ++nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o ++ ++obj-$(CONFIG_DRM_NOUVEAU)+= nouveau.o +diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c +new file mode 100644 +index 0000000..ab3b23a +--- /dev/null ++++ b/drivers/gpu/drm/nouveau/nouveau_bo.c +@@ -0,0 +1,296 @@ ++/* ++ * Copyright 2007 Dave Airlied ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++/* ++ * Authors: Dave Airlied ++ * Ben Skeggs ++ * Jeremy Kolb ++ */ ++ ++#include "drmP.h" ++#include "nouveau_drm.h" ++#include "nouveau_drv.h" ++#include "nouveau_dma.h" ++ ++static struct drm_ttm_backend * ++nouveau_bo_create_ttm_backend_entry(struct drm_device * dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ switch (dev_priv->gart_info.type) { ++ case NOUVEAU_GART_AGP: ++ return drm_agp_init_ttm(dev); ++ case NOUVEAU_GART_SGDMA: ++ return nouveau_sgdma_init_ttm(dev); ++ default: ++ DRM_ERROR("Unknown GART type %d\n", dev_priv->gart_info.type); ++ break; ++ } ++ ++ return NULL; ++} ++ ++static int ++nouveau_bo_fence_type(struct drm_buffer_object *bo, ++ uint32_t *fclass, uint32_t *type) ++{ ++ /* When we get called, *fclass is set to the requested fence class */ ++ ++ if (bo->mem.proposed_flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE)) ++ *type = 3; ++ else ++ *type = 1; ++ return 0; ++ ++} ++ ++static int ++nouveau_bo_invalidate_caches(struct drm_device *dev, uint64_t buffer_flags) ++{ ++ /* We'll do this from user space. */ ++ return 0; ++} ++ ++static int ++nouveau_bo_init_mem_type(struct drm_device *dev, uint32_t type, ++ struct drm_mem_type_manager *man) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ switch (type) { ++ case DRM_BO_MEM_LOCAL: ++ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | ++ _DRM_FLAG_MEMTYPE_CACHED; ++ man->drm_bus_maptype = 0; ++ break; ++ case DRM_BO_MEM_VRAM: ++ man->flags = _DRM_FLAG_MEMTYPE_FIXED | ++ _DRM_FLAG_MEMTYPE_MAPPABLE | ++ _DRM_FLAG_NEEDS_IOREMAP; ++ man->io_addr = NULL; ++ man->drm_bus_maptype = _DRM_FRAME_BUFFER; ++ man->io_offset = drm_get_resource_start(dev, 1); ++ man->io_size = drm_get_resource_len(dev, 1); ++ if (man->io_size > nouveau_mem_fb_amount(dev)) ++ man->io_size = nouveau_mem_fb_amount(dev); ++ break; ++ case DRM_BO_MEM_PRIV0: ++ /* Unmappable VRAM */ ++ man->flags = _DRM_FLAG_MEMTYPE_CMA; ++ man->drm_bus_maptype = 0; ++ break; ++ case DRM_BO_MEM_TT: ++ switch (dev_priv->gart_info.type) { ++ case NOUVEAU_GART_AGP: ++ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | ++ _DRM_FLAG_MEMTYPE_CSELECT | ++ _DRM_FLAG_NEEDS_IOREMAP; ++ man->drm_bus_maptype = _DRM_AGP; ++ break; ++ case NOUVEAU_GART_SGDMA: ++ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | ++ _DRM_FLAG_MEMTYPE_CSELECT | ++ _DRM_FLAG_MEMTYPE_CMA; ++ man->drm_bus_maptype = _DRM_SCATTER_GATHER; ++ break; ++ default: ++ DRM_ERROR("Unknown GART type: %d\n", ++ dev_priv->gart_info.type); ++ return -EINVAL; ++ } ++ ++ man->io_offset = dev_priv->gart_info.aper_base; ++ man->io_size = dev_priv->gart_info.aper_size; ++ man->io_addr = NULL; ++ break; ++ default: ++ DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); ++ return -EINVAL; ++ } ++ return 0; ++} ++ ++static uint64_t ++nouveau_bo_evict_flags(struct drm_buffer_object *bo) ++{ ++ switch (bo->mem.mem_type) { ++ case DRM_BO_MEM_LOCAL: ++ case DRM_BO_MEM_TT: ++ return DRM_BO_FLAG_MEM_LOCAL; ++ default: ++ return DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_CACHED; ++ } ++ return 0; ++} ++ ++ ++/* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access ++ * DRM_BO_MEM_{VRAM,PRIV0,TT} directly. ++ */ ++static int ++nouveau_bo_move_m2mf(struct drm_buffer_object *bo, int evict, int no_wait, ++ struct drm_bo_mem_reg *new_mem) ++{ ++ struct drm_device *dev = bo->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_drm_channel *dchan = &dev_priv->channel; ++ struct drm_bo_mem_reg *old_mem = &bo->mem; ++ uint32_t srch, dsth, page_count; ++ ++ /* Can happen during init/takedown */ ++ if (!dchan->chan) ++ return -EINVAL; ++ ++ srch = old_mem->mem_type == DRM_BO_MEM_TT ? NvDmaTT : NvDmaFB; ++ dsth = new_mem->mem_type == DRM_BO_MEM_TT ? NvDmaTT : NvDmaFB; ++ if (srch != dchan->m2mf_dma_source || dsth != dchan->m2mf_dma_destin) { ++ dchan->m2mf_dma_source = srch; ++ dchan->m2mf_dma_destin = dsth; ++ ++ BEGIN_RING(NvSubM2MF, ++ NV_MEMORY_TO_MEMORY_FORMAT_SET_DMA_SOURCE, 2); ++ OUT_RING (dchan->m2mf_dma_source); ++ OUT_RING (dchan->m2mf_dma_destin); ++ } ++ ++ page_count = new_mem->num_pages; ++ while (page_count) { ++ int line_count = (page_count > 2047) ? 2047 : page_count; ++ ++ BEGIN_RING(NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8); ++ OUT_RING (old_mem->mm_node->start << PAGE_SHIFT); ++ OUT_RING (new_mem->mm_node->start << PAGE_SHIFT); ++ OUT_RING (PAGE_SIZE); /* src_pitch */ ++ OUT_RING (PAGE_SIZE); /* dst_pitch */ ++ OUT_RING (PAGE_SIZE); /* line_length */ ++ OUT_RING (line_count); ++ OUT_RING ((1<<8)|(1<<0)); ++ OUT_RING (0); ++ BEGIN_RING(NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1); ++ OUT_RING (0); ++ ++ page_count -= line_count; ++ } ++ ++ return drm_bo_move_accel_cleanup(bo, evict, no_wait, dchan->chan->id, ++ DRM_FENCE_TYPE_EXE, 0, new_mem); ++} ++ ++/* Flip pages into the GART and move if we can. */ ++static int ++nouveau_bo_move_flipd(struct drm_buffer_object *bo, int evict, int no_wait, ++ struct drm_bo_mem_reg *new_mem) ++{ ++ struct drm_device *dev = bo->dev; ++ struct drm_bo_mem_reg tmp_mem; ++ int ret; ++ ++ tmp_mem = *new_mem; ++ tmp_mem.mm_node = NULL; ++ tmp_mem.proposed_flags = (DRM_BO_FLAG_MEM_TT | ++ DRM_BO_FLAG_CACHED | ++ DRM_BO_FLAG_FORCE_CACHING); ++ ++ ret = drm_bo_mem_space(bo, &tmp_mem, no_wait); ++ if (ret) ++ return ret; ++ ++ ret = drm_ttm_bind(bo->ttm, &tmp_mem); ++ if (ret) ++ goto out_cleanup; ++ ++ ret = nouveau_bo_move_m2mf(bo, 1, no_wait, &tmp_mem); ++ if (ret) ++ goto out_cleanup; ++ ++ ret = drm_bo_move_ttm(bo, evict, no_wait, new_mem); ++ ++out_cleanup: ++ if (tmp_mem.mm_node) { ++ mutex_lock(&dev->struct_mutex); ++ if (tmp_mem.mm_node != bo->pinned_node) ++ drm_mm_put_block(tmp_mem.mm_node); ++ tmp_mem.mm_node = NULL; ++ mutex_unlock(&dev->struct_mutex); ++ } ++ ++ return ret; ++} ++ ++static int ++nouveau_bo_move(struct drm_buffer_object *bo, int evict, int no_wait, ++ struct drm_bo_mem_reg *new_mem) ++{ ++ struct drm_bo_mem_reg *old_mem = &bo->mem; ++ ++ if (new_mem->mem_type == DRM_BO_MEM_LOCAL) { ++ if (old_mem->mem_type == DRM_BO_MEM_LOCAL) ++ return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); ++ if (nouveau_bo_move_flipd(bo, evict, no_wait, new_mem)) ++ return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); ++ } ++ else ++ if (old_mem->mem_type == DRM_BO_MEM_LOCAL) { ++ if (1 /*nouveau_bo_move_flips(bo, evict, no_wait, new_mem)*/) ++ return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); ++ } ++ else { ++ if (nouveau_bo_move_m2mf(bo, evict, no_wait, new_mem)) ++ return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); ++ } ++ ++ return 0; ++} ++ ++static void ++nouveau_bo_flush_ttm(struct drm_ttm *ttm) ++{ ++} ++ ++static uint32_t nouveau_mem_prios[] = { ++ DRM_BO_MEM_PRIV0, ++ DRM_BO_MEM_VRAM, ++ DRM_BO_MEM_TT, ++ DRM_BO_MEM_LOCAL ++}; ++static uint32_t nouveau_busy_prios[] = { ++ DRM_BO_MEM_TT, ++ DRM_BO_MEM_PRIV0, ++ DRM_BO_MEM_VRAM, ++ DRM_BO_MEM_LOCAL ++}; ++ ++struct drm_bo_driver nouveau_bo_driver = { ++ .mem_type_prio = nouveau_mem_prios, ++ .mem_busy_prio = nouveau_busy_prios, ++ .num_mem_type_prio = sizeof(nouveau_mem_prios)/sizeof(uint32_t), ++ .num_mem_busy_prio = sizeof(nouveau_busy_prios)/sizeof(uint32_t), ++ .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry, ++ .fence_type = nouveau_bo_fence_type, ++ .invalidate_caches = nouveau_bo_invalidate_caches, ++ .init_mem_type = nouveau_bo_init_mem_type, ++ .evict_flags = nouveau_bo_evict_flags, ++ .move = nouveau_bo_move, ++ .ttm_cache_flush= nouveau_bo_flush_ttm, ++ .command_stream_barrier = NULL ++}; +diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c +new file mode 100644 +index 0000000..e519dc4 +--- /dev/null ++++ b/drivers/gpu/drm/nouveau/nouveau_dma.c +@@ -0,0 +1,172 @@ ++/* ++ * Copyright (C) 2007 Ben Skeggs. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining ++ * a copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sublicense, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial ++ * portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. ++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE ++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION ++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION ++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "nouveau_drv.h" ++#include "nouveau_dma.h" ++ ++int ++nouveau_dma_channel_init(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_drm_channel *dchan = &dev_priv->channel; ++ struct nouveau_gpuobj *gpuobj = NULL; ++ struct mem_block *pushbuf; ++ int grclass, ret, i; ++ ++ DRM_DEBUG("\n"); ++ ++ pushbuf = nouveau_mem_alloc(dev, 0, 0x8000, ++ NOUVEAU_MEM_FB | NOUVEAU_MEM_MAPPED, ++ (struct drm_file *)-2); ++ if (!pushbuf) { ++ DRM_ERROR("Failed to allocate DMA push buffer\n"); ++ return -ENOMEM; ++ } ++ ++ /* Allocate channel */ ++ ret = nouveau_fifo_alloc(dev, &dchan->chan, (struct drm_file *)-2, ++ pushbuf, NvDmaFB, NvDmaTT); ++ if (ret) { ++ DRM_ERROR("Error allocating GPU channel: %d\n", ret); ++ return ret; ++ } ++ DRM_DEBUG("Using FIFO channel %d\n", dchan->chan->id); ++ ++ /* Map push buffer */ ++ drm_core_ioremap(dchan->chan->pushbuf_mem->map, dev); ++ if (!dchan->chan->pushbuf_mem->map->handle) { ++ DRM_ERROR("Failed to ioremap push buffer\n"); ++ return -EINVAL; ++ } ++ dchan->pushbuf = (void*)dchan->chan->pushbuf_mem->map->handle; ++ ++ /* Initialise DMA vars */ ++ dchan->max = (dchan->chan->pushbuf_mem->size >> 2) - 2; ++ dchan->put = dchan->chan->pushbuf_base >> 2; ++ dchan->cur = dchan->put; ++ dchan->free = dchan->max - dchan->cur; ++ ++ /* Insert NOPS for NOUVEAU_DMA_SKIPS */ ++ dchan->free -= NOUVEAU_DMA_SKIPS; ++ dchan->push_free = NOUVEAU_DMA_SKIPS; ++ for (i=0; i < NOUVEAU_DMA_SKIPS; i++) ++ OUT_RING(0); ++ ++ /* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier */ ++ if ((ret = nouveau_notifier_alloc(dchan->chan, NvNotify0, 1, ++ &dchan->notify0_offset))) { ++ DRM_ERROR("Error allocating NvNotify0: %d\n", ret); ++ return ret; ++ } ++ ++ /* We use NV_MEMORY_TO_MEMORY_FORMAT for buffer moves */ ++ if (dev_priv->card_type < NV_50) grclass = NV_MEMORY_TO_MEMORY_FORMAT; ++ else grclass = NV50_MEMORY_TO_MEMORY_FORMAT; ++ if ((ret = nouveau_gpuobj_gr_new(dchan->chan, grclass, &gpuobj))) { ++ DRM_ERROR("Error creating NvM2MF: %d\n", ret); ++ return ret; ++ } ++ ++ if ((ret = nouveau_gpuobj_ref_add(dev, dchan->chan, NvM2MF, ++ gpuobj, NULL))) { ++ DRM_ERROR("Error referencing NvM2MF: %d\n", ret); ++ return ret; ++ } ++ dchan->m2mf_dma_source = NvDmaFB; ++ dchan->m2mf_dma_destin = NvDmaFB; ++ ++ BEGIN_RING(NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NAME, 1); ++ OUT_RING (NvM2MF); ++ BEGIN_RING(NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_SET_DMA_NOTIFY, 1); ++ OUT_RING (NvNotify0); ++ BEGIN_RING(NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_SET_DMA_SOURCE, 2); ++ OUT_RING (dchan->m2mf_dma_source); ++ OUT_RING (dchan->m2mf_dma_destin); ++ FIRE_RING(); ++ ++ return 0; ++} ++ ++void ++nouveau_dma_channel_takedown(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_drm_channel *dchan = &dev_priv->channel; ++ ++ DRM_DEBUG("\n"); ++ ++ if (dchan->chan) { ++ nouveau_fifo_free(dchan->chan); ++ dchan->chan = NULL; ++ } ++} ++ ++#define READ_GET() ((NV_READ(dchan->chan->get) - \ ++ dchan->chan->pushbuf_base) >> 2) ++#define WRITE_PUT(val) do { \ ++ NV_WRITE(dchan->chan->put, \ ++ ((val) << 2) + dchan->chan->pushbuf_base); \ ++} while(0) ++ ++int ++nouveau_dma_wait(struct drm_device *dev, int size) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_drm_channel *dchan = &dev_priv->channel; ++ uint32_t get; ++ ++ while (dchan->free < size) { ++ get = READ_GET(); ++ ++ if (dchan->put >= get) { ++ dchan->free = dchan->max - dchan->cur; ++ ++ if (dchan->free < size) { ++ dchan->push_free = 1; ++ OUT_RING(0x20000000|dchan->chan->pushbuf_base); ++ if (get <= NOUVEAU_DMA_SKIPS) { ++ /*corner case - will be idle*/ ++ if (dchan->put <= NOUVEAU_DMA_SKIPS) ++ WRITE_PUT(NOUVEAU_DMA_SKIPS + 1); ++ ++ do { ++ get = READ_GET(); ++ } while (get <= NOUVEAU_DMA_SKIPS); ++ } ++ ++ WRITE_PUT(NOUVEAU_DMA_SKIPS); ++ dchan->cur = dchan->put = NOUVEAU_DMA_SKIPS; ++ dchan->free = get - (NOUVEAU_DMA_SKIPS + 1); ++ } ++ } else { ++ dchan->free = get - dchan->cur - 1; ++ } ++ } ++ ++ return 0; ++} +diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h +new file mode 100644 +index 0000000..ce3c58c +--- /dev/null ++++ b/drivers/gpu/drm/nouveau/nouveau_dma.h +@@ -0,0 +1,96 @@ ++/* ++ * Copyright (C) 2007 Ben Skeggs. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining ++ * a copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sublicense, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial ++ * portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. ++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE ++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION ++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION ++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ */ ++ ++#ifndef __NOUVEAU_DMA_H__ ++#define __NOUVEAU_DMA_H__ ++ ++typedef enum { ++ NvSubM2MF = 0, ++} nouveau_subchannel_id_t; ++ ++typedef enum { ++ NvM2MF = 0x80039001, ++ NvDmaFB = 0x8003d001, ++ NvDmaTT = 0x8003d002, ++ NvNotify0 = 0x8003d003 ++} nouveau_object_handle_t; ++ ++#define NV_MEMORY_TO_MEMORY_FORMAT 0x00000039 ++#define NV_MEMORY_TO_MEMORY_FORMAT_NAME 0x00000000 ++#define NV_MEMORY_TO_MEMORY_FORMAT_SET_REF 0x00000050 ++#define NV_MEMORY_TO_MEMORY_FORMAT_NOP 0x00000100 ++#define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY 0x00000104 ++#define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY_STYLE_WRITE 0x00000000 ++#define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY_STYLE_WRITE_LE_AWAKEN 0x00000001 ++#define NV_MEMORY_TO_MEMORY_FORMAT_SET_DMA_NOTIFY 0x00000180 ++#define NV_MEMORY_TO_MEMORY_FORMAT_SET_DMA_SOURCE 0x00000184 ++#define NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN 0x0000030c ++ ++#define NV50_MEMORY_TO_MEMORY_FORMAT 0x00005039 ++#define NV50_MEMORY_TO_MEMORY_FORMAT_UNK200 0x00000200 ++#define NV50_MEMORY_TO_MEMORY_FORMAT_UNK21C 0x0000021c ++#define NV50_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN_HIGH 0x00000238 ++#define NV50_MEMORY_TO_MEMORY_FORMAT_OFFSET_OUT_HIGH 0x0000023c ++ ++#define BEGIN_RING(subc, mthd, cnt) do { \ ++ int push_size = (cnt) + 1; \ ++ if (dchan->push_free) { \ ++ DRM_ERROR("prior packet incomplete: %d\n", dchan->push_free); \ ++ break; \ ++ } \ ++ if (dchan->free < push_size) { \ ++ if (nouveau_dma_wait(dev, push_size)) { \ ++ DRM_ERROR("FIFO timeout\n"); \ ++ break; \ ++ } \ ++ } \ ++ dchan->free -= push_size; \ ++ dchan->push_free = push_size; \ ++ OUT_RING(((cnt)<<18) | ((subc)<<15) | mthd); \ ++} while(0) ++ ++#define OUT_RING(data) do { \ ++ if (dchan->push_free == 0) { \ ++ DRM_ERROR("no space left in packet\n"); \ ++ break; \ ++ } \ ++ dchan->pushbuf[dchan->cur++] = (data); \ ++ dchan->push_free--; \ ++} while(0) ++ ++#define FIRE_RING() do { \ ++ if (dchan->push_free) { \ ++ DRM_ERROR("packet incomplete: %d\n", dchan->push_free); \ ++ break; \ ++ } \ ++ if (dchan->cur != dchan->put) { \ ++ DRM_MEMORYBARRIER(); \ ++ dchan->put = dchan->cur; \ ++ NV_WRITE(dchan->chan->put, dchan->put << 2); \ ++ } \ ++} while(0) ++ ++#endif +diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c +new file mode 100644 +index 0000000..4a4277f +--- /dev/null ++++ b/drivers/gpu/drm/nouveau/nouveau_drv.c +@@ -0,0 +1,112 @@ ++/* ++ * Copyright 2005 Stephane Marchesin. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "nouveau_drv.h" ++ ++#include "drm_pciids.h" ++ ++static struct pci_device_id pciidlist[] = { ++ { ++ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID), ++ .class = PCI_BASE_CLASS_DISPLAY << 16, ++ .class_mask = 0xff << 16, ++ }, ++ { ++ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA_SGS, PCI_ANY_ID), ++ .class = PCI_BASE_CLASS_DISPLAY << 16, ++ .class_mask = 0xff << 16, ++ } ++}; ++ ++extern struct drm_ioctl_desc nouveau_ioctls[]; ++extern int nouveau_max_ioctl; ++ ++static struct drm_driver driver = { ++ .driver_features = ++ DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG | ++ DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED, ++ .load = nouveau_load, ++ .firstopen = nouveau_firstopen, ++ .lastclose = nouveau_lastclose, ++ .unload = nouveau_unload, ++ .preclose = nouveau_preclose, ++ .irq_preinstall = nouveau_irq_preinstall, ++ .irq_postinstall = nouveau_irq_postinstall, ++ .irq_uninstall = nouveau_irq_uninstall, ++ .irq_handler = nouveau_irq_handler, ++ .reclaim_buffers = drm_core_reclaim_buffers, ++ .get_map_ofs = drm_core_get_map_ofs, ++ .get_reg_ofs = drm_core_get_reg_ofs, ++ .ioctls = nouveau_ioctls, ++ .fops = { ++ .owner = THIS_MODULE, ++ .open = drm_open, ++ .release = drm_release, ++ .ioctl = drm_ioctl, ++ .mmap = drm_mmap, ++ .poll = drm_poll, ++ .fasync = drm_fasync, ++#if defined(CONFIG_COMPAT) ++ .compat_ioctl = nouveau_compat_ioctl, ++#endif ++ }, ++ .pci_driver = { ++ .name = DRIVER_NAME, ++ .id_table = pciidlist, ++ }, ++ ++ .bo_driver = &nouveau_bo_driver, ++ .fence_driver = &nouveau_fence_driver, ++ ++ .name = DRIVER_NAME, ++ .desc = DRIVER_DESC, ++#ifdef GIT_REVISION ++ .date = GIT_REVISION, ++#else ++ .date = DRIVER_DATE, ++#endif ++ .major = DRIVER_MAJOR, ++ .minor = DRIVER_MINOR, ++ .patchlevel = DRIVER_PATCHLEVEL, ++}; ++ ++static int __init nouveau_init(void) ++{ ++ driver.num_ioctls = nouveau_max_ioctl; ++ return drm_init(&driver); ++} ++ ++static void __exit nouveau_exit(void) ++{ ++ drm_exit(&driver); ++} ++ ++module_init(nouveau_init); ++module_exit(nouveau_exit); ++ ++MODULE_AUTHOR(DRIVER_AUTHOR); ++MODULE_DESCRIPTION(DRIVER_DESC); ++MODULE_LICENSE("GPL and additional rights"); +diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h +new file mode 100644 +index 0000000..a97e3e9 +--- /dev/null ++++ b/drivers/gpu/drm/nouveau/nouveau_drv.h +@@ -0,0 +1,621 @@ ++/* ++ * Copyright 2005 Stephane Marchesin. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#ifndef __NOUVEAU_DRV_H__ ++#define __NOUVEAU_DRV_H__ ++ ++#define DRIVER_AUTHOR "Stephane Marchesin" ++#define DRIVER_EMAIL "dri-devel@lists.sourceforge.net" ++ ++#define DRIVER_NAME "nouveau" ++#define DRIVER_DESC "nVidia Riva/TNT/GeForce" ++#define DRIVER_DATE "20060213" ++ ++#define DRIVER_MAJOR 0 ++#define DRIVER_MINOR 0 ++#define DRIVER_PATCHLEVEL 11 ++ ++#define NOUVEAU_FAMILY 0x0000FFFF ++#define NOUVEAU_FLAGS 0xFFFF0000 ++ ++#include "nouveau_drm.h" ++#include "nouveau_reg.h" ++ ++struct mem_block { ++ struct mem_block *next; ++ struct mem_block *prev; ++ uint64_t start; ++ uint64_t size; ++ struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */ ++ int flags; ++ drm_local_map_t *map; ++ drm_handle_t map_handle; ++}; ++ ++enum nouveau_flags { ++ NV_NFORCE =0x10000000, ++ NV_NFORCE2 =0x20000000 ++}; ++ ++#define NVOBJ_ENGINE_SW 0 ++#define NVOBJ_ENGINE_GR 1 ++#define NVOBJ_ENGINE_INT 0xdeadbeef ++ ++#define NVOBJ_FLAG_ALLOW_NO_REFS (1 << 0) ++#define NVOBJ_FLAG_ZERO_ALLOC (1 << 1) ++#define NVOBJ_FLAG_ZERO_FREE (1 << 2) ++#define NVOBJ_FLAG_FAKE (1 << 3) ++struct nouveau_gpuobj { ++ struct list_head list; ++ ++ int im_channel; ++ struct mem_block *im_pramin; ++ struct mem_block *im_backing; ++ int im_bound; ++ ++ uint32_t flags; ++ int refcount; ++ ++ uint32_t engine; ++ uint32_t class; ++ ++ void (*dtor)(struct drm_device *, struct nouveau_gpuobj *); ++ void *priv; ++}; ++ ++struct nouveau_gpuobj_ref { ++ struct list_head list; ++ ++ struct nouveau_gpuobj *gpuobj; ++ uint32_t instance; ++ ++ int channel; ++ int handle; ++}; ++ ++struct nouveau_channel ++{ ++ struct drm_device *dev; ++ int id; ++ ++ /* owner of this fifo */ ++ struct drm_file *file_priv; ++ /* mapping of the fifo itself */ ++ drm_local_map_t *map; ++ /* mapping of the regs controling the fifo */ ++ drm_local_map_t *regs; ++ ++ /* Fencing */ ++ uint32_t next_sequence; ++ ++ /* DMA push buffer */ ++ struct nouveau_gpuobj_ref *pushbuf; ++ struct mem_block *pushbuf_mem; ++ uint32_t pushbuf_base; ++ ++ /* FIFO user control regs */ ++ uint32_t user, user_size; ++ uint32_t put; ++ uint32_t get; ++ uint32_t ref_cnt; ++ ++ /* Notifier memory */ ++ struct mem_block *notifier_block; ++ struct mem_block *notifier_heap; ++ drm_local_map_t *notifier_map; ++ ++ /* PFIFO context */ ++ struct nouveau_gpuobj_ref *ramfc; ++ ++ /* PGRAPH context */ ++ /* XXX may be merge 2 pointers as private data ??? */ ++ struct nouveau_gpuobj_ref *ramin_grctx; ++ void *pgraph_ctx; ++ ++ /* NV50 VM */ ++ struct nouveau_gpuobj *vm_pd; ++ struct nouveau_gpuobj_ref *vm_gart_pt; ++ struct nouveau_gpuobj_ref *vm_vram_pt; ++ ++ /* Objects */ ++ struct nouveau_gpuobj_ref *ramin; /* Private instmem */ ++ struct mem_block *ramin_heap; /* Private PRAMIN heap */ ++ struct nouveau_gpuobj_ref *ramht; /* Hash table */ ++ struct list_head ramht_refs; /* Objects referenced by RAMHT */ ++}; ++ ++struct nouveau_drm_channel { ++ struct nouveau_channel *chan; ++ ++ /* DMA state */ ++ int max, put, cur, free; ++ int push_free; ++ volatile uint32_t *pushbuf; ++ ++ /* Notifiers */ ++ uint32_t notify0_offset; ++ ++ /* Buffer moves */ ++ uint32_t m2mf_dma_source; ++ uint32_t m2mf_dma_destin; ++}; ++ ++struct nouveau_config { ++ struct { ++ int location; ++ int size; ++ } cmdbuf; ++}; ++ ++struct nouveau_instmem_engine { ++ void *priv; ++ ++ int (*init)(struct drm_device *dev); ++ void (*takedown)(struct drm_device *dev); ++ ++ int (*populate)(struct drm_device *, struct nouveau_gpuobj *, ++ uint32_t *size); ++ void (*clear)(struct drm_device *, struct nouveau_gpuobj *); ++ int (*bind)(struct drm_device *, struct nouveau_gpuobj *); ++ int (*unbind)(struct drm_device *, struct nouveau_gpuobj *); ++}; ++ ++struct nouveau_mc_engine { ++ int (*init)(struct drm_device *dev); ++ void (*takedown)(struct drm_device *dev); ++}; ++ ++struct nouveau_timer_engine { ++ int (*init)(struct drm_device *dev); ++ void (*takedown)(struct drm_device *dev); ++ uint64_t (*read)(struct drm_device *dev); ++}; ++ ++struct nouveau_fb_engine { ++ int (*init)(struct drm_device *dev); ++ void (*takedown)(struct drm_device *dev); ++}; ++ ++struct nouveau_fifo_engine { ++ void *priv; ++ ++ int channels; ++ ++ int (*init)(struct drm_device *); ++ void (*takedown)(struct drm_device *); ++ ++ int (*channel_id)(struct drm_device *); ++ ++ int (*create_context)(struct nouveau_channel *); ++ void (*destroy_context)(struct nouveau_channel *); ++ int (*load_context)(struct nouveau_channel *); ++ int (*save_context)(struct nouveau_channel *); ++}; ++ ++struct nouveau_pgraph_engine { ++ int (*init)(struct drm_device *); ++ void (*takedown)(struct drm_device *); ++ ++ int (*create_context)(struct nouveau_channel *); ++ void (*destroy_context)(struct nouveau_channel *); ++ int (*load_context)(struct nouveau_channel *); ++ int (*save_context)(struct nouveau_channel *); ++}; ++ ++struct nouveau_engine { ++ struct nouveau_instmem_engine instmem; ++ struct nouveau_mc_engine mc; ++ struct nouveau_timer_engine timer; ++ struct nouveau_fb_engine fb; ++ struct nouveau_pgraph_engine graph; ++ struct nouveau_fifo_engine fifo; ++}; ++ ++#define NOUVEAU_MAX_CHANNEL_NR 128 ++struct drm_nouveau_private { ++ enum { ++ NOUVEAU_CARD_INIT_DOWN, ++ NOUVEAU_CARD_INIT_DONE, ++ NOUVEAU_CARD_INIT_FAILED ++ } init_state; ++ ++ int ttm; ++ ++ /* the card type, takes NV_* as values */ ++ int card_type; ++ /* exact chipset, derived from NV_PMC_BOOT_0 */ ++ int chipset; ++ int flags; ++ ++ drm_local_map_t *mmio; ++ drm_local_map_t *fb; ++ drm_local_map_t *ramin; /* NV40 onwards */ ++ ++ int fifo_alloc_count; ++ struct nouveau_channel *fifos[NOUVEAU_MAX_CHANNEL_NR]; ++ ++ struct nouveau_engine Engine; ++ struct nouveau_drm_channel channel; ++ ++ /* RAMIN configuration, RAMFC, RAMHT and RAMRO offsets */ ++ struct nouveau_gpuobj *ramht; ++ uint32_t ramin_rsvd_vram; ++ uint32_t ramht_offset; ++ uint32_t ramht_size; ++ uint32_t ramht_bits; ++ uint32_t ramfc_offset; ++ uint32_t ramfc_size; ++ uint32_t ramro_offset; ++ uint32_t ramro_size; ++ ++ /* base physical adresses */ ++ uint64_t fb_phys; ++ uint64_t fb_available_size; ++ ++ struct { ++ enum { ++ NOUVEAU_GART_NONE = 0, ++ NOUVEAU_GART_AGP, ++ NOUVEAU_GART_SGDMA ++ } type; ++ uint64_t aper_base; ++ uint64_t aper_size; ++ ++ struct nouveau_gpuobj *sg_ctxdma; ++ struct page *sg_dummy_page; ++ dma_addr_t sg_dummy_bus; ++ ++ /* nottm hack */ ++ struct drm_ttm_backend *sg_be; ++ unsigned long sg_handle; ++ } gart_info; ++ ++ /* G8x global VRAM page table */ ++ struct nouveau_gpuobj *vm_vram_pt; ++ ++ /* the mtrr covering the FB */ ++ int fb_mtrr; ++ ++ struct mem_block *agp_heap; ++ struct mem_block *fb_heap; ++ struct mem_block *fb_nomap_heap; ++ struct mem_block *ramin_heap; ++ struct mem_block *pci_heap; ++ ++ /* context table pointed to be NV_PGRAPH_CHANNEL_CTX_TABLE (0x400780) */ ++ uint32_t ctx_table_size; ++ struct nouveau_gpuobj_ref *ctx_table; ++ ++ struct nouveau_config config; ++ ++ struct list_head gpuobj_list; ++ ++ struct nouveau_suspend_resume { ++ uint32_t fifo_mode; ++ uint32_t graph_ctx_control; ++ uint32_t graph_state; ++ uint32_t *ramin_copy; ++ uint64_t ramin_size; ++ } susres; ++}; ++ ++#define NOUVEAU_CHECK_INITIALISED_WITH_RETURN do { \ ++ struct drm_nouveau_private *nv = dev->dev_private; \ ++ if (nv->init_state != NOUVEAU_CARD_INIT_DONE) { \ ++ DRM_ERROR("called without init\n"); \ ++ return -EINVAL; \ ++ } \ ++} while(0) ++ ++#define NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(id,cl,ch) do { \ ++ struct drm_nouveau_private *nv = dev->dev_private; \ ++ if (!nouveau_fifo_owner(dev, (cl), (id))) { \ ++ DRM_ERROR("pid %d doesn't own channel %d\n", \ ++ DRM_CURRENTPID, (id)); \ ++ return -EPERM; \ ++ } \ ++ (ch) = nv->fifos[(id)]; \ ++} while(0) ++ ++/* nouveau_state.c */ ++extern void nouveau_preclose(struct drm_device *dev, struct drm_file *); ++extern int nouveau_load(struct drm_device *, unsigned long flags); ++extern int nouveau_firstopen(struct drm_device *); ++extern void nouveau_lastclose(struct drm_device *); ++extern int nouveau_unload(struct drm_device *); ++extern int nouveau_ioctl_getparam(struct drm_device *, void *data, ++ struct drm_file *); ++extern int nouveau_ioctl_setparam(struct drm_device *, void *data, ++ struct drm_file *); ++extern void nouveau_wait_for_idle(struct drm_device *); ++extern int nouveau_card_init(struct drm_device *); ++extern int nouveau_ioctl_card_init(struct drm_device *, void *data, ++ struct drm_file *); ++extern int nouveau_ioctl_suspend(struct drm_device *, void *data, ++ struct drm_file *); ++extern int nouveau_ioctl_resume(struct drm_device *, void *data, ++ struct drm_file *); ++ ++/* nouveau_mem.c */ ++extern int nouveau_mem_init_heap(struct mem_block **, uint64_t start, ++ uint64_t size); ++extern struct mem_block *nouveau_mem_alloc_block(struct mem_block *, ++ uint64_t size, int align2, ++ struct drm_file *, int tail); ++extern void nouveau_mem_takedown(struct mem_block **heap); ++extern void nouveau_mem_free_block(struct mem_block *); ++extern uint64_t nouveau_mem_fb_amount(struct drm_device *); ++extern void nouveau_mem_release(struct drm_file *, struct mem_block *heap); ++extern int nouveau_ioctl_mem_alloc(struct drm_device *, void *data, ++ struct drm_file *); ++extern int nouveau_ioctl_mem_free(struct drm_device *, void *data, ++ struct drm_file *); ++extern int nouveau_ioctl_mem_tile(struct drm_device *, void *data, ++ struct drm_file *); ++extern struct mem_block* nouveau_mem_alloc(struct drm_device *, ++ int alignment, uint64_t size, ++ int flags, struct drm_file *); ++extern void nouveau_mem_free(struct drm_device *dev, struct mem_block*); ++extern int nouveau_mem_init(struct drm_device *); ++extern int nouveau_mem_init_ttm(struct drm_device *); ++extern void nouveau_mem_close(struct drm_device *); ++ ++/* nouveau_notifier.c */ ++extern int nouveau_notifier_init_channel(struct nouveau_channel *); ++extern void nouveau_notifier_takedown_channel(struct nouveau_channel *); ++extern int nouveau_notifier_alloc(struct nouveau_channel *, uint32_t handle, ++ int cout, uint32_t *offset); ++extern int nouveau_ioctl_notifier_alloc(struct drm_device *, void *data, ++ struct drm_file *); ++extern int nouveau_ioctl_notifier_free(struct drm_device *, void *data, ++ struct drm_file *); ++ ++/* nouveau_fifo.c */ ++extern int nouveau_fifo_init(struct drm_device *); ++extern int nouveau_fifo_ctx_size(struct drm_device *); ++extern void nouveau_fifo_cleanup(struct drm_device *, struct drm_file *); ++extern int nouveau_fifo_owner(struct drm_device *, struct drm_file *, ++ int channel); ++extern int nouveau_fifo_alloc(struct drm_device *dev, ++ struct nouveau_channel **chan, ++ struct drm_file *file_priv, ++ struct mem_block *pushbuf, ++ uint32_t fb_ctxdma, uint32_t tt_ctxdma); ++extern void nouveau_fifo_free(struct nouveau_channel *); ++extern int nouveau_channel_idle(struct nouveau_channel *chan); ++ ++/* nouveau_object.c */ ++extern int nouveau_gpuobj_early_init(struct drm_device *); ++extern int nouveau_gpuobj_init(struct drm_device *); ++extern void nouveau_gpuobj_takedown(struct drm_device *); ++extern void nouveau_gpuobj_late_takedown(struct drm_device *); ++extern int nouveau_gpuobj_channel_init(struct nouveau_channel *, ++ uint32_t vram_h, uint32_t tt_h); ++extern void nouveau_gpuobj_channel_takedown(struct nouveau_channel *); ++extern int nouveau_gpuobj_new(struct drm_device *, struct nouveau_channel *, ++ int size, int align, uint32_t flags, ++ struct nouveau_gpuobj **); ++extern int nouveau_gpuobj_del(struct drm_device *, struct nouveau_gpuobj **); ++extern int nouveau_gpuobj_ref_add(struct drm_device *, struct nouveau_channel *, ++ uint32_t handle, struct nouveau_gpuobj *, ++ struct nouveau_gpuobj_ref **); ++extern int nouveau_gpuobj_ref_del(struct drm_device *, ++ struct nouveau_gpuobj_ref **); ++extern int nouveau_gpuobj_ref_find(struct nouveau_channel *, uint32_t handle, ++ struct nouveau_gpuobj_ref **ref_ret); ++extern int nouveau_gpuobj_new_ref(struct drm_device *, ++ struct nouveau_channel *alloc_chan, ++ struct nouveau_channel *ref_chan, ++ uint32_t handle, int size, int align, ++ uint32_t flags, struct nouveau_gpuobj_ref **); ++extern int nouveau_gpuobj_new_fake(struct drm_device *, ++ uint32_t p_offset, uint32_t b_offset, ++ uint32_t size, uint32_t flags, ++ struct nouveau_gpuobj **, ++ struct nouveau_gpuobj_ref**); ++extern int nouveau_gpuobj_dma_new(struct nouveau_channel *, int class, ++ uint64_t offset, uint64_t size, int access, ++ int target, struct nouveau_gpuobj **); ++extern int nouveau_gpuobj_gart_dma_new(struct nouveau_channel *, ++ uint64_t offset, uint64_t size, ++ int access, struct nouveau_gpuobj **, ++ uint32_t *o_ret); ++extern int nouveau_gpuobj_gr_new(struct nouveau_channel *, int class, ++ struct nouveau_gpuobj **); ++extern int nouveau_ioctl_grobj_alloc(struct drm_device *, void *data, ++ struct drm_file *); ++extern int nouveau_ioctl_gpuobj_free(struct drm_device *, void *data, ++ struct drm_file *); ++ ++/* nouveau_irq.c */ ++extern irqreturn_t nouveau_irq_handler(DRM_IRQ_ARGS); ++extern void nouveau_irq_preinstall(struct drm_device *); ++extern int nouveau_irq_postinstall(struct drm_device *); ++extern void nouveau_irq_uninstall(struct drm_device *); ++ ++/* nouveau_sgdma.c */ ++extern int nouveau_sgdma_init(struct drm_device *); ++extern void nouveau_sgdma_takedown(struct drm_device *); ++extern int nouveau_sgdma_get_page(struct drm_device *, uint32_t offset, ++ uint32_t *page); ++extern struct drm_ttm_backend *nouveau_sgdma_init_ttm(struct drm_device *); ++extern int nouveau_sgdma_nottm_hack_init(struct drm_device *); ++extern void nouveau_sgdma_nottm_hack_takedown(struct drm_device *); ++ ++/* nouveau_dma.c */ ++extern int nouveau_dma_channel_init(struct drm_device *); ++extern void nouveau_dma_channel_takedown(struct drm_device *); ++extern int nouveau_dma_wait(struct drm_device *, int size); ++ ++/* nv04_fb.c */ ++extern int nv04_fb_init(struct drm_device *); ++extern void nv04_fb_takedown(struct drm_device *); ++ ++/* nv10_fb.c */ ++extern int nv10_fb_init(struct drm_device *); ++extern void nv10_fb_takedown(struct drm_device *); ++ ++/* nv40_fb.c */ ++extern int nv40_fb_init(struct drm_device *); ++extern void nv40_fb_takedown(struct drm_device *); ++ ++/* nv04_fifo.c */ ++extern int nv04_fifo_channel_id(struct drm_device *); ++extern int nv04_fifo_create_context(struct nouveau_channel *); ++extern void nv04_fifo_destroy_context(struct nouveau_channel *); ++extern int nv04_fifo_load_context(struct nouveau_channel *); ++extern int nv04_fifo_save_context(struct nouveau_channel *); ++ ++/* nv10_fifo.c */ ++extern int nv10_fifo_channel_id(struct drm_device *); ++extern int nv10_fifo_create_context(struct nouveau_channel *); ++extern void nv10_fifo_destroy_context(struct nouveau_channel *); ++extern int nv10_fifo_load_context(struct nouveau_channel *); ++extern int nv10_fifo_save_context(struct nouveau_channel *); ++ ++/* nv40_fifo.c */ ++extern int nv40_fifo_init(struct drm_device *); ++extern int nv40_fifo_create_context(struct nouveau_channel *); ++extern void nv40_fifo_destroy_context(struct nouveau_channel *); ++extern int nv40_fifo_load_context(struct nouveau_channel *); ++extern int nv40_fifo_save_context(struct nouveau_channel *); ++ ++/* nv50_fifo.c */ ++extern int nv50_fifo_init(struct drm_device *); ++extern void nv50_fifo_takedown(struct drm_device *); ++extern int nv50_fifo_channel_id(struct drm_device *); ++extern int nv50_fifo_create_context(struct nouveau_channel *); ++extern void nv50_fifo_destroy_context(struct nouveau_channel *); ++extern int nv50_fifo_load_context(struct nouveau_channel *); ++extern int nv50_fifo_save_context(struct nouveau_channel *); ++ ++/* nv04_graph.c */ ++extern void nouveau_nv04_context_switch(struct drm_device *); ++extern int nv04_graph_init(struct drm_device *); ++extern void nv04_graph_takedown(struct drm_device *); ++extern int nv04_graph_create_context(struct nouveau_channel *); ++extern void nv04_graph_destroy_context(struct nouveau_channel *); ++extern int nv04_graph_load_context(struct nouveau_channel *); ++extern int nv04_graph_save_context(struct nouveau_channel *); ++ ++/* nv10_graph.c */ ++extern void nouveau_nv10_context_switch(struct drm_device *); ++extern int nv10_graph_init(struct drm_device *); ++extern void nv10_graph_takedown(struct drm_device *); ++extern int nv10_graph_create_context(struct nouveau_channel *); ++extern void nv10_graph_destroy_context(struct nouveau_channel *); ++extern int nv10_graph_load_context(struct nouveau_channel *); ++extern int nv10_graph_save_context(struct nouveau_channel *); ++ ++/* nv20_graph.c */ ++extern int nv20_graph_create_context(struct nouveau_channel *); ++extern void nv20_graph_destroy_context(struct nouveau_channel *); ++extern int nv20_graph_load_context(struct nouveau_channel *); ++extern int nv20_graph_save_context(struct nouveau_channel *); ++extern int nv20_graph_init(struct drm_device *); ++extern void nv20_graph_takedown(struct drm_device *); ++extern int nv30_graph_init(struct drm_device *); ++ ++/* nv40_graph.c */ ++extern int nv40_graph_init(struct drm_device *); ++extern void nv40_graph_takedown(struct drm_device *); ++extern int nv40_graph_create_context(struct nouveau_channel *); ++extern void nv40_graph_destroy_context(struct nouveau_channel *); ++extern int nv40_graph_load_context(struct nouveau_channel *); ++extern int nv40_graph_save_context(struct nouveau_channel *); ++ ++/* nv50_graph.c */ ++extern int nv50_graph_init(struct drm_device *); ++extern void nv50_graph_takedown(struct drm_device *); ++extern int nv50_graph_create_context(struct nouveau_channel *); ++extern void nv50_graph_destroy_context(struct nouveau_channel *); ++extern int nv50_graph_load_context(struct nouveau_channel *); ++extern int nv50_graph_save_context(struct nouveau_channel *); ++ ++/* nv04_instmem.c */ ++extern int nv04_instmem_init(struct drm_device *); ++extern void nv04_instmem_takedown(struct drm_device *); ++extern int nv04_instmem_populate(struct drm_device *, struct nouveau_gpuobj *, ++ uint32_t *size); ++extern void nv04_instmem_clear(struct drm_device *, struct nouveau_gpuobj *); ++extern int nv04_instmem_bind(struct drm_device *, struct nouveau_gpuobj *); ++extern int nv04_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *); ++ ++/* nv50_instmem.c */ ++extern int nv50_instmem_init(struct drm_device *); ++extern void nv50_instmem_takedown(struct drm_device *); ++extern int nv50_instmem_populate(struct drm_device *, struct nouveau_gpuobj *, ++ uint32_t *size); ++extern void nv50_instmem_clear(struct drm_device *, struct nouveau_gpuobj *); ++extern int nv50_instmem_bind(struct drm_device *, struct nouveau_gpuobj *); ++extern int nv50_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *); ++ ++/* nv04_mc.c */ ++extern int nv04_mc_init(struct drm_device *); ++extern void nv04_mc_takedown(struct drm_device *); ++ ++/* nv40_mc.c */ ++extern int nv40_mc_init(struct drm_device *); ++extern void nv40_mc_takedown(struct drm_device *); ++ ++/* nv50_mc.c */ ++extern int nv50_mc_init(struct drm_device *); ++extern void nv50_mc_takedown(struct drm_device *); ++ ++/* nv04_timer.c */ ++extern int nv04_timer_init(struct drm_device *); ++extern uint64_t nv04_timer_read(struct drm_device *); ++extern void nv04_timer_takedown(struct drm_device *); ++ ++extern long nouveau_compat_ioctl(struct file *file, unsigned int cmd, ++ unsigned long arg); ++ ++/* nouveau_buffer.c */ ++extern struct drm_bo_driver nouveau_bo_driver; ++ ++/* nouveau_fence.c */ ++extern struct drm_fence_driver nouveau_fence_driver; ++extern void nouveau_fence_handler(struct drm_device *dev, int channel); ++ ++#if defined(__powerpc__) ++#define NV_READ(reg) in_be32((void __iomem *)(dev_priv->mmio)->handle + (reg) ) ++#define NV_WRITE(reg,val) out_be32((void __iomem *)(dev_priv->mmio)->handle + (reg) , (val) ) ++#else ++#define NV_READ(reg) DRM_READ32( dev_priv->mmio, (reg) ) ++#define NV_WRITE(reg,val) DRM_WRITE32( dev_priv->mmio, (reg), (val) ) ++#endif ++ ++/* PRAMIN access */ ++#if defined(__powerpc__) ++#define NV_RI32(o) in_be32((void __iomem *)(dev_priv->ramin)->handle+(o)) ++#define NV_WI32(o,v) out_be32((void __iomem*)(dev_priv->ramin)->handle+(o), (v)) ++#else ++#define NV_RI32(o) DRM_READ32(dev_priv->ramin, (o)) ++#define NV_WI32(o,v) DRM_WRITE32(dev_priv->ramin, (o), (v)) ++#endif ++ ++#define INSTANCE_RD(o,i) NV_RI32((o)->im_pramin->start + ((i)<<2)) ++#define INSTANCE_WR(o,i,v) NV_WI32((o)->im_pramin->start + ((i)<<2), (v)) ++ ++#endif /* __NOUVEAU_DRV_H__ */ +diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c +new file mode 100644 +index 0000000..4ad51ae +--- /dev/null ++++ b/drivers/gpu/drm/nouveau/nouveau_fence.c +@@ -0,0 +1,119 @@ ++/* ++ * Copyright (C) 2007 Ben Skeggs. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining ++ * a copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sublicense, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial ++ * portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. ++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE ++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION ++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION ++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "nouveau_drv.h" ++#include "nouveau_dma.h" ++ ++static int ++nouveau_fence_has_irq(struct drm_device *dev, uint32_t class, uint32_t flags) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ DRM_DEBUG("class=%d, flags=0x%08x\n", class, flags); ++ ++ /* DRM's channel always uses IRQs to signal fences */ ++ if (class == dev_priv->channel.chan->id) ++ return 1; ++ ++ /* Other channels don't use IRQs at all yet */ ++ return 0; ++} ++ ++static int ++nouveau_fence_emit(struct drm_device *dev, uint32_t class, uint32_t flags, ++ uint32_t *breadcrumb, uint32_t *native_type) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_channel *chan = dev_priv->fifos[class]; ++ struct nouveau_drm_channel *dchan = &dev_priv->channel; ++ ++ DRM_DEBUG("class=%d, flags=0x%08x\n", class, flags); ++ ++ /* We can't emit fences on client channels, update sequence number ++ * and userspace will emit the fence ++ */ ++ *breadcrumb = ++chan->next_sequence; ++ *native_type = DRM_FENCE_TYPE_EXE; ++ if (chan != dchan->chan) { ++ DRM_DEBUG("user fence 0x%08x\n", *breadcrumb); ++ return 0; ++ } ++ ++ DRM_DEBUG("emit 0x%08x\n", *breadcrumb); ++ BEGIN_RING(NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_SET_REF, 1); ++ OUT_RING (*breadcrumb); ++ BEGIN_RING(NvSubM2MF, 0x0150, 1); ++ OUT_RING (0); ++ FIRE_RING (); ++ ++ return 0; ++} ++ ++static void ++nouveau_fence_poll(struct drm_device *dev, uint32_t class, uint32_t waiting_types) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct drm_fence_class_manager *fc = &dev->fm.fence_class[class]; ++ struct nouveau_channel *chan = dev_priv->fifos[class]; ++ ++ DRM_DEBUG("class=%d\n", class); ++ DRM_DEBUG("pending: 0x%08x 0x%08x\n", waiting_types, fc->waiting_types); ++ ++ if (waiting_types & DRM_FENCE_TYPE_EXE) { ++ uint32_t sequence = NV_READ(chan->ref_cnt); ++ ++ DRM_DEBUG("got 0x%08x\n", sequence); ++ drm_fence_handler(dev, class, sequence, waiting_types, 0); ++ } ++} ++ ++void ++nouveau_fence_handler(struct drm_device *dev, int channel) ++{ ++ struct drm_fence_manager *fm = &dev->fm; ++ struct drm_fence_class_manager *fc = &fm->fence_class[channel]; ++ ++ DRM_DEBUG("class=%d\n", channel); ++ ++ write_lock(&fm->lock); ++ nouveau_fence_poll(dev, channel, fc->waiting_types); ++ write_unlock(&fm->lock); ++} ++ ++struct drm_fence_driver nouveau_fence_driver = { ++ .num_classes = 8, ++ .wrap_diff = (1 << 30), ++ .flush_diff = (1 << 29), ++ .sequence_mask = 0xffffffffU, ++ .has_irq = nouveau_fence_has_irq, ++ .emit = nouveau_fence_emit, ++ .flush = NULL, ++ .poll = nouveau_fence_poll, ++ .needed_flush = NULL, ++ .wait = NULL ++}; +diff --git a/drivers/gpu/drm/nouveau/nouveau_fifo.c b/drivers/gpu/drm/nouveau/nouveau_fifo.c +new file mode 100644 +index 0000000..92ea8fc +--- /dev/null ++++ b/drivers/gpu/drm/nouveau/nouveau_fifo.c +@@ -0,0 +1,601 @@ ++/* ++ * Copyright 2005-2006 Stephane Marchesin ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "nouveau_drv.h" ++#include "nouveau_drm.h" ++ ++ ++/* returns the size of fifo context */ ++int nouveau_fifo_ctx_size(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv=dev->dev_private; ++ ++ if (dev_priv->card_type >= NV_40) ++ return 128; ++ else if (dev_priv->card_type >= NV_17) ++ return 64; ++ else ++ return 32; ++} ++ ++/*********************************** ++ * functions doing the actual work ++ ***********************************/ ++ ++static int nouveau_fifo_instmem_configure(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ NV_WRITE(NV03_PFIFO_RAMHT, ++ (0x03 << 24) /* search 128 */ | ++ ((dev_priv->ramht_bits - 9) << 16) | ++ (dev_priv->ramht_offset >> 8) ++ ); ++ ++ NV_WRITE(NV03_PFIFO_RAMRO, dev_priv->ramro_offset>>8); ++ ++ switch(dev_priv->card_type) ++ { ++ case NV_40: ++ switch (dev_priv->chipset) { ++ case 0x47: ++ case 0x49: ++ case 0x4b: ++ NV_WRITE(0x2230, 1); ++ break; ++ default: ++ break; ++ } ++ NV_WRITE(NV40_PFIFO_RAMFC, 0x30002); ++ break; ++ case NV_44: ++ NV_WRITE(NV40_PFIFO_RAMFC, ((nouveau_mem_fb_amount(dev)-512*1024+dev_priv->ramfc_offset)>>16) | ++ (2 << 16)); ++ break; ++ case NV_30: ++ case NV_20: ++ case NV_17: ++ NV_WRITE(NV03_PFIFO_RAMFC, (dev_priv->ramfc_offset>>8) | ++ (1 << 16) /* 64 Bytes entry*/); ++ /* XXX nvidia blob set bit 18, 21,23 for nv20 & nv30 */ ++ break; ++ case NV_11: ++ case NV_10: ++ case NV_04: ++ NV_WRITE(NV03_PFIFO_RAMFC, dev_priv->ramfc_offset>>8); ++ break; ++ } ++ ++ return 0; ++} ++ ++int nouveau_fifo_init(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int ret; ++ ++ NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) & ++ ~NV_PMC_ENABLE_PFIFO); ++ NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) | ++ NV_PMC_ENABLE_PFIFO); ++ ++ /* Enable PFIFO error reporting */ ++ NV_WRITE(NV03_PFIFO_INTR_0, 0xFFFFFFFF); ++ NV_WRITE(NV03_PFIFO_INTR_EN_0, 0xFFFFFFFF); ++ ++ NV_WRITE(NV03_PFIFO_CACHES, 0x00000000); ++ ++ ret = nouveau_fifo_instmem_configure(dev); ++ if (ret) { ++ DRM_ERROR("Failed to configure instance memory\n"); ++ return ret; ++ } ++ ++ /* FIXME remove all the stuff that's done in nouveau_fifo_alloc */ ++ ++ DRM_DEBUG("Setting defaults for remaining PFIFO regs\n"); ++ ++ /* All channels into PIO mode */ ++ NV_WRITE(NV04_PFIFO_MODE, 0x00000000); ++ ++ NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000000); ++ NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000000); ++ /* Channel 0 active, PIO mode */ ++ NV_WRITE(NV03_PFIFO_CACHE1_PUSH1, 0x00000000); ++ /* PUT and GET to 0 */ ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUT, 0x00000000); ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_GET, 0x00000000); ++ /* No cmdbuf object */ ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_INSTANCE, 0x00000000); ++ NV_WRITE(NV03_PFIFO_CACHE0_PUSH0, 0x00000000); ++ NV_WRITE(NV04_PFIFO_CACHE0_PULL0, 0x00000000); ++ NV_WRITE(NV04_PFIFO_SIZE, 0x0000FFFF); ++ NV_WRITE(NV04_PFIFO_CACHE1_HASH, 0x0000FFFF); ++ NV_WRITE(NV04_PFIFO_CACHE0_PULL1, 0x00000001); ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_CTL, 0x00000000); ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_STATE, 0x00000000); ++ NV_WRITE(NV04_PFIFO_CACHE1_ENGINE, 0x00000000); ++ ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_FETCH, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_112_BYTES | ++ NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | ++ NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_4 | ++#ifdef __BIG_ENDIAN ++ NV_PFIFO_CACHE1_BIG_ENDIAN | ++#endif ++ 0x00000000); ++ ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001); ++ NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000001); ++ NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000001); ++ NV_WRITE(NV04_PFIFO_CACHE1_PULL1, 0x00000001); ++ ++ /* FIXME on NV04 */ ++ if (dev_priv->card_type >= NV_10) { ++ NV_WRITE(NV10_PGRAPH_CTX_USER, 0x0); ++ NV_WRITE(NV04_PFIFO_DELAY_0, 0xff /* retrycount*/ ); ++ if (dev_priv->card_type >= NV_40) ++ NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x00002001); ++ else ++ NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10110000); ++ } else { ++ NV_WRITE(NV04_PGRAPH_CTX_USER, 0x0); ++ NV_WRITE(NV04_PFIFO_DELAY_0, 0xff /* retrycount*/ ); ++ NV_WRITE(NV04_PGRAPH_CTX_CONTROL, 0x10110000); ++ } ++ ++ NV_WRITE(NV04_PFIFO_DMA_TIMESLICE, 0x001fffff); ++ NV_WRITE(NV03_PFIFO_CACHES, 0x00000001); ++ return 0; ++} ++ ++static int ++nouveau_fifo_pushbuf_ctxdma_init(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct mem_block *pb = chan->pushbuf_mem; ++ struct nouveau_gpuobj *pushbuf = NULL; ++ int ret; ++ ++ if (pb->flags & NOUVEAU_MEM_AGP) { ++ ret = nouveau_gpuobj_gart_dma_new(chan, pb->start, pb->size, ++ NV_DMA_ACCESS_RO, ++ &pushbuf, ++ &chan->pushbuf_base); ++ } else ++ if (pb->flags & NOUVEAU_MEM_PCI) { ++ ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, ++ pb->start, pb->size, ++ NV_DMA_ACCESS_RO, ++ NV_DMA_TARGET_PCI_NONLINEAR, ++ &pushbuf); ++ chan->pushbuf_base = 0; ++ } else if (dev_priv->card_type != NV_04) { ++ ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, ++ pb->start, pb->size, ++ NV_DMA_ACCESS_RO, ++ NV_DMA_TARGET_VIDMEM, &pushbuf); ++ chan->pushbuf_base = 0; ++ } else { ++ /* NV04 cmdbuf hack, from original ddx.. not sure of it's ++ * exact reason for existing :) PCI access to cmdbuf in ++ * VRAM. ++ */ ++ ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, ++ pb->start + ++ drm_get_resource_start(dev, 1), ++ pb->size, NV_DMA_ACCESS_RO, ++ NV_DMA_TARGET_PCI, &pushbuf); ++ chan->pushbuf_base = 0; ++ } ++ ++ if ((ret = nouveau_gpuobj_ref_add(dev, chan, 0, pushbuf, ++ &chan->pushbuf))) { ++ DRM_ERROR("Error referencing push buffer ctxdma: %d\n", ret); ++ if (pushbuf != dev_priv->gart_info.sg_ctxdma) ++ nouveau_gpuobj_del(dev, &pushbuf); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++static struct mem_block * ++nouveau_fifo_user_pushbuf_alloc(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_config *config = &dev_priv->config; ++ struct mem_block *pb; ++ int pb_min_size = max(NV03_FIFO_SIZE,PAGE_SIZE); ++ ++ /* Defaults for unconfigured values */ ++ if (!config->cmdbuf.location) ++ config->cmdbuf.location = NOUVEAU_MEM_FB; ++ if (!config->cmdbuf.size || config->cmdbuf.size < pb_min_size) ++ config->cmdbuf.size = pb_min_size; ++ ++ pb = nouveau_mem_alloc(dev, 0, config->cmdbuf.size, ++ config->cmdbuf.location | NOUVEAU_MEM_MAPPED, ++ (struct drm_file *)-2); ++ if (!pb) ++ DRM_ERROR("Couldn't allocate DMA push buffer.\n"); ++ ++ return pb; ++} ++ ++/* allocates and initializes a fifo for user space consumption */ ++int ++nouveau_fifo_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, ++ struct drm_file *file_priv, struct mem_block *pushbuf, ++ uint32_t vram_handle, uint32_t tt_handle) ++{ ++ int ret; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_engine *engine = &dev_priv->Engine; ++ struct nouveau_channel *chan; ++ int channel; ++ ++ /* ++ * Alright, here is the full story ++ * Nvidia cards have multiple hw fifo contexts (praise them for that, ++ * no complicated crash-prone context switches) ++ * We allocate a new context for each app and let it write to it directly ++ * (woo, full userspace command submission !) ++ * When there are no more contexts, you lost ++ */ ++ for (channel = 0; channel < engine->fifo.channels; channel++) { ++ if (dev_priv->fifos[channel] == NULL) ++ break; ++ } ++ ++ /* no more fifos. you lost. */ ++ if (channel == engine->fifo.channels) ++ return -EINVAL; ++ ++ dev_priv->fifos[channel] = drm_calloc(1, sizeof(struct nouveau_channel), ++ DRM_MEM_DRIVER); ++ if (!dev_priv->fifos[channel]) ++ return -ENOMEM; ++ dev_priv->fifo_alloc_count++; ++ chan = dev_priv->fifos[channel]; ++ chan->dev = dev; ++ chan->id = channel; ++ chan->file_priv = file_priv; ++ chan->pushbuf_mem = pushbuf; ++ ++ DRM_INFO("Allocating FIFO number %d\n", channel); ++ ++ /* Locate channel's user control regs */ ++ if (dev_priv->card_type < NV_40) { ++ chan->user = NV03_USER(channel); ++ chan->user_size = NV03_USER_SIZE; ++ chan->put = NV03_USER_DMA_PUT(channel); ++ chan->get = NV03_USER_DMA_GET(channel); ++ chan->ref_cnt = NV03_USER_REF_CNT(channel); ++ } else ++ if (dev_priv->card_type < NV_50) { ++ chan->user = NV40_USER(channel); ++ chan->user_size = NV40_USER_SIZE; ++ chan->put = NV40_USER_DMA_PUT(channel); ++ chan->get = NV40_USER_DMA_GET(channel); ++ chan->ref_cnt = NV40_USER_REF_CNT(channel); ++ } else { ++ chan->user = NV50_USER(channel); ++ chan->user_size = NV50_USER_SIZE; ++ chan->put = NV50_USER_DMA_PUT(channel); ++ chan->get = NV50_USER_DMA_GET(channel); ++ chan->ref_cnt = NV50_USER_REF_CNT(channel); ++ } ++ ++ /* Allocate space for per-channel fixed notifier memory */ ++ ret = nouveau_notifier_init_channel(chan); ++ if (ret) { ++ nouveau_fifo_free(chan); ++ return ret; ++ } ++ ++ /* Setup channel's default objects */ ++ ret = nouveau_gpuobj_channel_init(chan, vram_handle, tt_handle); ++ if (ret) { ++ nouveau_fifo_free(chan); ++ return ret; ++ } ++ ++ /* Create a dma object for the push buffer */ ++ ret = nouveau_fifo_pushbuf_ctxdma_init(chan); ++ if (ret) { ++ nouveau_fifo_free(chan); ++ return ret; ++ } ++ ++ nouveau_wait_for_idle(dev); ++ ++ /* disable the fifo caches */ ++ NV_WRITE(NV03_PFIFO_CACHES, 0x00000000); ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH, NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH)&(~0x1)); ++ NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000000); ++ NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000000); ++ ++ /* Create a graphics context for new channel */ ++ ret = engine->graph.create_context(chan); ++ if (ret) { ++ nouveau_fifo_free(chan); ++ return ret; ++ } ++ ++ /* Construct inital RAMFC for new channel */ ++ ret = engine->fifo.create_context(chan); ++ if (ret) { ++ nouveau_fifo_free(chan); ++ return ret; ++ } ++ ++ /* setup channel's default get/put values ++ * XXX: quite possibly extremely pointless.. ++ */ ++ NV_WRITE(chan->get, chan->pushbuf_base); ++ NV_WRITE(chan->put, chan->pushbuf_base); ++ ++ /* If this is the first channel, setup PFIFO ourselves. For any ++ * other case, the GPU will handle this when it switches contexts. ++ */ ++ if (dev_priv->fifo_alloc_count == 1) { ++ ret = engine->fifo.load_context(chan); ++ if (ret) { ++ nouveau_fifo_free(chan); ++ return ret; ++ } ++ ++ ret = engine->graph.load_context(chan); ++ if (ret) { ++ nouveau_fifo_free(chan); ++ return ret; ++ } ++ } ++ ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH, ++ NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH) | 1); ++ NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000001); ++ NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000001); ++ NV_WRITE(NV04_PFIFO_CACHE1_PULL1, 0x00000001); ++ ++ /* reenable the fifo caches */ ++ NV_WRITE(NV03_PFIFO_CACHES, 1); ++ ++ DRM_INFO("%s: initialised FIFO %d\n", __func__, channel); ++ *chan_ret = chan; ++ return 0; ++} ++ ++int ++nouveau_channel_idle(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_engine *engine = &dev_priv->Engine; ++ uint32_t caches; ++ int idle; ++ ++ caches = NV_READ(NV03_PFIFO_CACHES); ++ NV_WRITE(NV03_PFIFO_CACHES, caches & ~1); ++ ++ if (engine->fifo.channel_id(dev) != chan->id) { ++ struct nouveau_gpuobj *ramfc = chan->ramfc->gpuobj; ++ ++ if (INSTANCE_RD(ramfc, 0) != INSTANCE_RD(ramfc, 1)) ++ idle = 0; ++ else ++ idle = 1; ++ } else { ++ idle = (NV_READ(NV04_PFIFO_CACHE1_DMA_GET) == ++ NV_READ(NV04_PFIFO_CACHE1_DMA_PUT)); ++ } ++ ++ NV_WRITE(NV03_PFIFO_CACHES, caches); ++ return idle; ++} ++ ++/* stops a fifo */ ++void nouveau_fifo_free(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_engine *engine = &dev_priv->Engine; ++ uint64_t t_start; ++ ++ DRM_INFO("%s: freeing fifo %d\n", __func__, chan->id); ++ ++ /* Give the channel a chance to idle, wait 2s (hopefully) */ ++ t_start = engine->timer.read(dev); ++ while (!nouveau_channel_idle(chan)) { ++ if (engine->timer.read(dev) - t_start > 2000000000ULL) { ++ DRM_ERROR("Failed to idle channel %d before destroy." ++ "Prepare for strangeness..\n", chan->id); ++ break; ++ } ++ } ++ ++ /*XXX: Maybe should wait for PGRAPH to finish with the stuff it fetched ++ * from CACHE1 too? ++ */ ++ ++ /* disable the fifo caches */ ++ NV_WRITE(NV03_PFIFO_CACHES, 0x00000000); ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH, NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH)&(~0x1)); ++ NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000000); ++ NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000000); ++ ++ // FIXME XXX needs more code ++ ++ engine->fifo.destroy_context(chan); ++ ++ /* Cleanup PGRAPH state */ ++ engine->graph.destroy_context(chan); ++ ++ /* reenable the fifo caches */ ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH, ++ NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH) | 1); ++ NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000001); ++ NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000001); ++ NV_WRITE(NV03_PFIFO_CACHES, 0x00000001); ++ ++ /* Deallocate push buffer */ ++ nouveau_gpuobj_ref_del(dev, &chan->pushbuf); ++ if (chan->pushbuf_mem) { ++ nouveau_mem_free(dev, chan->pushbuf_mem); ++ chan->pushbuf_mem = NULL; ++ } ++ ++ /* Destroy objects belonging to the channel */ ++ nouveau_gpuobj_channel_takedown(chan); ++ ++ nouveau_notifier_takedown_channel(chan); ++ ++ dev_priv->fifos[chan->id] = NULL; ++ dev_priv->fifo_alloc_count--; ++ drm_free(chan, sizeof(*chan), DRM_MEM_DRIVER); ++} ++ ++/* cleanups all the fifos from file_priv */ ++void nouveau_fifo_cleanup(struct drm_device *dev, struct drm_file *file_priv) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_engine *engine = &dev_priv->Engine; ++ int i; ++ ++ DRM_DEBUG("clearing FIFO enables from file_priv\n"); ++ for(i = 0; i < engine->fifo.channels; i++) { ++ struct nouveau_channel *chan = dev_priv->fifos[i]; ++ ++ if (chan && chan->file_priv == file_priv) ++ nouveau_fifo_free(chan); ++ } ++} ++ ++int ++nouveau_fifo_owner(struct drm_device *dev, struct drm_file *file_priv, ++ int channel) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_engine *engine = &dev_priv->Engine; ++ ++ if (channel >= engine->fifo.channels) ++ return 0; ++ if (dev_priv->fifos[channel] == NULL) ++ return 0; ++ return (dev_priv->fifos[channel]->file_priv == file_priv); ++} ++ ++/*********************************** ++ * ioctls wrapping the functions ++ ***********************************/ ++ ++static int nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct drm_nouveau_channel_alloc *init = data; ++ struct drm_map_list *entry; ++ struct nouveau_channel *chan; ++ struct mem_block *pushbuf; ++ int res; ++ ++ NOUVEAU_CHECK_INITIALISED_WITH_RETURN; ++ ++ if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0) ++ return -EINVAL; ++ ++ pushbuf = nouveau_fifo_user_pushbuf_alloc(dev); ++ if (!pushbuf) ++ return -ENOMEM; ++ ++ res = nouveau_fifo_alloc(dev, &chan, file_priv, pushbuf, ++ init->fb_ctxdma_handle, ++ init->tt_ctxdma_handle); ++ if (res) ++ return res; ++ init->channel = chan->id; ++ init->put_base = chan->pushbuf_base; ++ ++ /* make the fifo available to user space */ ++ /* first, the fifo control regs */ ++ init->ctrl = dev_priv->mmio->offset + chan->user; ++ init->ctrl_size = chan->user_size; ++ res = drm_addmap(dev, init->ctrl, init->ctrl_size, _DRM_REGISTERS, ++ 0, &chan->regs); ++ if (res != 0) ++ return res; ++ ++ entry = drm_find_matching_map(dev, chan->regs); ++ if (!entry) ++ return -EINVAL; ++ init->ctrl = entry->user_token; ++ ++ /* pass back FIFO map info to the caller */ ++ init->cmdbuf = chan->pushbuf_mem->map_handle; ++ init->cmdbuf_size = chan->pushbuf_mem->size; ++ ++ /* and the notifier block */ ++ init->notifier = chan->notifier_block->map_handle; ++ init->notifier_size = chan->notifier_block->size; ++ ++ return 0; ++} ++ ++static int nouveau_ioctl_fifo_free(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_nouveau_channel_free *cfree = data; ++ struct nouveau_channel *chan; ++ ++ NOUVEAU_CHECK_INITIALISED_WITH_RETURN; ++ NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(cfree->channel, file_priv, chan); ++ ++ nouveau_fifo_free(chan); ++ return 0; ++} ++ ++/*********************************** ++ * finally, the ioctl table ++ ***********************************/ ++ ++struct drm_ioctl_desc nouveau_ioctls[] = { ++ DRM_IOCTL_DEF(DRM_NOUVEAU_CARD_INIT, nouveau_ioctl_card_init, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_NOUVEAU_MEM_ALLOC, nouveau_ioctl_mem_alloc, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_NOUVEAU_MEM_FREE, nouveau_ioctl_mem_free, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_NOUVEAU_MEM_TILE, nouveau_ioctl_mem_tile, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_NOUVEAU_SUSPEND, nouveau_ioctl_suspend, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_NOUVEAU_RESUME, nouveau_ioctl_resume, DRM_AUTH), ++}; ++ ++int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls); +diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c +new file mode 100644 +index 0000000..4f53a50 +--- /dev/null ++++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c +@@ -0,0 +1,68 @@ ++/** ++ * \file mga_ioc32.c ++ * ++ * 32-bit ioctl compatibility routines for the MGA DRM. ++ * ++ * \author Dave Airlie with code from patches by Egbert Eich ++ * ++ * ++ * Copyright (C) Paul Mackerras 2005 ++ * Copyright (C) Egbert Eich 2003,2004 ++ * Copyright (C) Dave Airlie 2005 ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, ++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, ++ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS ++ * IN THE SOFTWARE. ++ */ ++ ++#include ++ ++#include "drmP.h" ++#include "drm.h" ++ ++#include "nouveau_drm.h" ++ ++/** ++ * Called whenever a 32-bit process running under a 64-bit kernel ++ * performs an ioctl on /dev/dri/card. ++ * ++ * \param filp file pointer. ++ * \param cmd command. ++ * \param arg user argument. ++ * \return zero on success or negative number on failure. ++ */ ++long nouveau_compat_ioctl(struct file *filp, unsigned int cmd, ++ unsigned long arg) ++{ ++ unsigned int nr = DRM_IOCTL_NR(cmd); ++ drm_ioctl_compat_t *fn = NULL; ++ int ret; ++ ++ if (nr < DRM_COMMAND_BASE) ++ return drm_compat_ioctl(filp, cmd, arg); ++ ++ lock_kernel(); /* XXX for now */ ++ if (fn != NULL) ++ ret = (*fn)(filp, cmd, arg); ++ else ++ ret = drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg); ++ unlock_kernel(); ++ ++ return ret; ++} +diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c +new file mode 100644 +index 0000000..2a3d8a0 +--- /dev/null ++++ b/drivers/gpu/drm/nouveau/nouveau_irq.c +@@ -0,0 +1,568 @@ ++/* ++ * Copyright (C) 2006 Ben Skeggs. ++ * ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining ++ * a copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sublicense, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial ++ * portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. ++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE ++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION ++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION ++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ */ ++ ++/* ++ * Authors: ++ * Ben Skeggs ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "nouveau_drm.h" ++#include "nouveau_drv.h" ++#include "nouveau_reg.h" ++#include "nouveau_swmthd.h" ++ ++void ++nouveau_irq_preinstall(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ /* Master disable */ ++ NV_WRITE(NV03_PMC_INTR_EN_0, 0); ++} ++ ++int ++nouveau_irq_postinstall(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ /* Master enable */ ++ NV_WRITE(NV03_PMC_INTR_EN_0, NV_PMC_INTR_EN_0_MASTER_ENABLE); ++ ++ return 0; ++} ++ ++void ++nouveau_irq_uninstall(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ /* Master disable */ ++ NV_WRITE(NV03_PMC_INTR_EN_0, 0); ++} ++ ++static void ++nouveau_fifo_irq_handler(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_engine *engine = &dev_priv->Engine; ++ uint32_t status, reassign; ++ ++ reassign = NV_READ(NV03_PFIFO_CACHES) & 1; ++ while ((status = NV_READ(NV03_PFIFO_INTR_0))) { ++ uint32_t chid, get; ++ ++ NV_WRITE(NV03_PFIFO_CACHES, 0); ++ ++ chid = engine->fifo.channel_id(dev); ++ get = NV_READ(NV03_PFIFO_CACHE1_GET); ++ ++ if (status & NV_PFIFO_INTR_CACHE_ERROR) { ++ uint32_t mthd, data; ++ int ptr; ++ ++ ptr = get >> 2; ++ if (dev_priv->card_type < NV_40) { ++ mthd = NV_READ(NV04_PFIFO_CACHE1_METHOD(ptr)); ++ data = NV_READ(NV04_PFIFO_CACHE1_DATA(ptr)); ++ } else { ++ mthd = NV_READ(NV40_PFIFO_CACHE1_METHOD(ptr)); ++ data = NV_READ(NV40_PFIFO_CACHE1_DATA(ptr)); ++ } ++ ++ DRM_INFO("PFIFO_CACHE_ERROR - " ++ "Ch %d/%d Mthd 0x%04x Data 0x%08x\n", ++ chid, (mthd >> 13) & 7, mthd & 0x1ffc, data); ++ ++ NV_WRITE(NV03_PFIFO_CACHE1_GET, get + 4); ++ NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 1); ++ ++ status &= ~NV_PFIFO_INTR_CACHE_ERROR; ++ NV_WRITE(NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR); ++ } ++ ++ if (status & NV_PFIFO_INTR_DMA_PUSHER) { ++ DRM_INFO("PFIFO_DMA_PUSHER - Ch %d\n", chid); ++ ++ status &= ~NV_PFIFO_INTR_DMA_PUSHER; ++ NV_WRITE(NV03_PFIFO_INTR_0, NV_PFIFO_INTR_DMA_PUSHER); ++ ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_STATE, 0x00000000); ++ if (NV_READ(NV04_PFIFO_CACHE1_DMA_PUT) != get) ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_GET, get + 4); ++ } ++ ++ if (status) { ++ DRM_INFO("Unhandled PFIFO_INTR - 0x%08x\n", status); ++ NV_WRITE(NV03_PFIFO_INTR_0, status); ++ NV_WRITE(NV03_PMC_INTR_EN_0, 0); ++ } ++ ++ NV_WRITE(NV03_PFIFO_CACHES, reassign); ++ } ++ ++ NV_WRITE(NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING); ++} ++ ++struct nouveau_bitfield_names { ++ uint32_t mask; ++ const char * name; ++}; ++ ++static struct nouveau_bitfield_names nouveau_nstatus_names[] = ++{ ++ { NV04_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" }, ++ { NV04_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" }, ++ { NV04_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" }, ++ { NV04_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" } ++}; ++ ++static struct nouveau_bitfield_names nouveau_nstatus_names_nv10[] = ++{ ++ { NV10_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" }, ++ { NV10_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" }, ++ { NV10_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" }, ++ { NV10_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" } ++}; ++ ++static struct nouveau_bitfield_names nouveau_nsource_names[] = ++{ ++ { NV03_PGRAPH_NSOURCE_NOTIFICATION, "NOTIFICATION" }, ++ { NV03_PGRAPH_NSOURCE_DATA_ERROR, "DATA_ERROR" }, ++ { NV03_PGRAPH_NSOURCE_PROTECTION_ERROR, "PROTECTION_ERROR" }, ++ { NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION, "RANGE_EXCEPTION" }, ++ { NV03_PGRAPH_NSOURCE_LIMIT_COLOR, "LIMIT_COLOR" }, ++ { NV03_PGRAPH_NSOURCE_LIMIT_ZETA, "LIMIT_ZETA" }, ++ { NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD, "ILLEGAL_MTHD" }, ++ { NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION, "DMA_R_PROTECTION" }, ++ { NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION, "DMA_W_PROTECTION" }, ++ { NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION, "FORMAT_EXCEPTION" }, ++ { NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION, "PATCH_EXCEPTION" }, ++ { NV03_PGRAPH_NSOURCE_STATE_INVALID, "STATE_INVALID" }, ++ { NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY, "DOUBLE_NOTIFY" }, ++ { NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE, "NOTIFY_IN_USE" }, ++ { NV03_PGRAPH_NSOURCE_METHOD_CNT, "METHOD_CNT" }, ++ { NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION, "BFR_NOTIFICATION" }, ++ { NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION, "DMA_VTX_PROTECTION" }, ++ { NV03_PGRAPH_NSOURCE_DMA_WIDTH_A, "DMA_WIDTH_A" }, ++ { NV03_PGRAPH_NSOURCE_DMA_WIDTH_B, "DMA_WIDTH_B" }, ++}; ++ ++static void ++nouveau_print_bitfield_names(uint32_t value, ++ const struct nouveau_bitfield_names *namelist, ++ const int namelist_len) ++{ ++ int i; ++ for(i=0; idev_private; ++ uint32_t inst; ++ int i; ++ ++ if (dev_priv->card_type < NV_40) ++ return dev_priv->Engine.fifo.channels; ++ else ++ if (dev_priv->card_type < NV_50) ++ inst = (NV_READ(0x40032c) & 0xfffff) << 4; ++ else ++ inst = NV_READ(0x40032c) & 0xfffff; ++ ++ for (i = 0; i < dev_priv->Engine.fifo.channels; i++) { ++ struct nouveau_channel *chan = dev_priv->fifos[i]; ++ ++ if (!chan || !chan->ramin_grctx) ++ continue; ++ ++ if (dev_priv->card_type < NV_50) { ++ if (inst == chan->ramin_grctx->instance) ++ break; ++ } else { ++ if (inst == INSTANCE_RD(chan->ramin_grctx->gpuobj, 0)) ++ break; ++ } ++ } ++ ++ return i; ++} ++ ++static int ++nouveau_graph_trapped_channel(struct drm_device *dev, int *channel_ret) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_engine *engine = &dev_priv->Engine; ++ int channel; ++ ++ if (dev_priv->card_type < NV_10) ++ channel = (NV_READ(NV04_PGRAPH_TRAPPED_ADDR) >> 24) & 0xf; ++ else ++ if (dev_priv->card_type < NV_40) ++ channel = (NV_READ(NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f; ++ else ++ channel = nouveau_graph_chid_from_grctx(dev); ++ ++ if (channel >= engine->fifo.channels || !dev_priv->fifos[channel]) { ++ DRM_ERROR("AIII, invalid/inactive channel id %d\n", channel); ++ return -EINVAL; ++ } ++ ++ *channel_ret = channel; ++ return 0; ++} ++ ++struct nouveau_pgraph_trap { ++ int channel; ++ int class; ++ int subc, mthd, size; ++ uint32_t data, data2; ++ uint32_t nsource, nstatus; ++}; ++ ++static void ++nouveau_graph_trap_info(struct drm_device *dev, ++ struct nouveau_pgraph_trap *trap) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t address; ++ ++ trap->nsource = trap->nstatus = 0; ++ if (dev_priv->card_type < NV_50) { ++ trap->nsource = NV_READ(NV03_PGRAPH_NSOURCE); ++ trap->nstatus = NV_READ(NV03_PGRAPH_NSTATUS); ++ } ++ ++ if (nouveau_graph_trapped_channel(dev, &trap->channel)) ++ trap->channel = -1; ++ address = NV_READ(NV04_PGRAPH_TRAPPED_ADDR); ++ ++ trap->mthd = address & 0x1FFC; ++ trap->data = NV_READ(NV04_PGRAPH_TRAPPED_DATA); ++ if (dev_priv->card_type < NV_10) { ++ trap->subc = (address >> 13) & 0x7; ++ } else { ++ trap->subc = (address >> 16) & 0x7; ++ trap->data2 = NV_READ(NV10_PGRAPH_TRAPPED_DATA_HIGH); ++ } ++ ++ if (dev_priv->card_type < NV_10) { ++ trap->class = NV_READ(0x400180 + trap->subc*4) & 0xFF; ++ } else if (dev_priv->card_type < NV_40) { ++ trap->class = NV_READ(0x400160 + trap->subc*4) & 0xFFF; ++ } else if (dev_priv->card_type < NV_50) { ++ trap->class = NV_READ(0x400160 + trap->subc*4) & 0xFFFF; ++ } else { ++ trap->class = NV_READ(0x400814); ++ } ++} ++ ++static void ++nouveau_graph_dump_trap_info(struct drm_device *dev, const char *id, ++ struct nouveau_pgraph_trap *trap) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t nsource = trap->nsource, nstatus = trap->nstatus; ++ ++ DRM_INFO("%s - nSource:", id); ++ nouveau_print_bitfield_names(nsource, nouveau_nsource_names, ++ ARRAY_SIZE(nouveau_nsource_names)); ++ printk(", nStatus:"); ++ if (dev_priv->card_type < NV_10) ++ nouveau_print_bitfield_names(nstatus, nouveau_nstatus_names, ++ ARRAY_SIZE(nouveau_nstatus_names)); ++ else ++ nouveau_print_bitfield_names(nstatus, nouveau_nstatus_names_nv10, ++ ARRAY_SIZE(nouveau_nstatus_names_nv10)); ++ printk("\n"); ++ ++ DRM_INFO("%s - Ch %d/%d Class 0x%04x Mthd 0x%04x Data 0x%08x:0x%08x\n", ++ id, trap->channel, trap->subc, trap->class, trap->mthd, ++ trap->data2, trap->data); ++} ++ ++static inline void ++nouveau_pgraph_intr_notify(struct drm_device *dev, uint32_t nsource) ++{ ++ struct nouveau_pgraph_trap trap; ++ int unhandled = 0; ++ ++ nouveau_graph_trap_info(dev, &trap); ++ ++ if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) { ++ /* NV4 (nvidia TNT 1) reports software methods with ++ * PGRAPH NOTIFY ILLEGAL_MTHD ++ */ ++ DRM_DEBUG("Got NV04 software method method %x for class %#x\n", ++ trap.mthd, trap.class); ++ ++ if (nouveau_sw_method_execute(dev, trap.class, trap.mthd)) { ++ DRM_ERROR("Unable to execute NV04 software method %x " ++ "for object class %x. Please report.\n", ++ trap.mthd, trap.class); ++ unhandled = 1; ++ } ++ } else { ++ unhandled = 1; ++ } ++ ++ if (unhandled) ++ nouveau_graph_dump_trap_info(dev, "PGRAPH_NOTIFY", &trap); ++} ++ ++static inline void ++nouveau_pgraph_intr_error(struct drm_device *dev, uint32_t nsource) ++{ ++ struct nouveau_pgraph_trap trap; ++ int unhandled = 0; ++ ++ nouveau_graph_trap_info(dev, &trap); ++ trap.nsource = nsource; ++ ++ if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) { ++ if (trap.channel >= 0 && trap.mthd == 0x0150) { ++ nouveau_fence_handler(dev, trap.channel); ++ } else ++ if (nouveau_sw_method_execute(dev, trap.class, trap.mthd)) { ++ unhandled = 1; ++ } ++ } else { ++ unhandled = 1; ++ } ++ ++ if (unhandled) ++ nouveau_graph_dump_trap_info(dev, "PGRAPH_ERROR", &trap); ++} ++ ++static inline void ++nouveau_pgraph_intr_context_switch(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_engine *engine = &dev_priv->Engine; ++ uint32_t chid; ++ ++ chid = engine->fifo.channel_id(dev); ++ DRM_DEBUG("PGRAPH context switch interrupt channel %x\n", chid); ++ ++ switch(dev_priv->card_type) { ++ case NV_04: ++ case NV_05: ++ nouveau_nv04_context_switch(dev); ++ break; ++ case NV_10: ++ case NV_11: ++ case NV_17: ++ nouveau_nv10_context_switch(dev); ++ break; ++ default: ++ DRM_ERROR("Context switch not implemented\n"); ++ break; ++ } ++} ++ ++static void ++nouveau_pgraph_irq_handler(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t status; ++ ++ while ((status = NV_READ(NV03_PGRAPH_INTR))) { ++ uint32_t nsource = NV_READ(NV03_PGRAPH_NSOURCE); ++ ++ if (status & NV_PGRAPH_INTR_NOTIFY) { ++ nouveau_pgraph_intr_notify(dev, nsource); ++ ++ status &= ~NV_PGRAPH_INTR_NOTIFY; ++ NV_WRITE(NV03_PGRAPH_INTR, NV_PGRAPH_INTR_NOTIFY); ++ } ++ ++ if (status & NV_PGRAPH_INTR_ERROR) { ++ nouveau_pgraph_intr_error(dev, nsource); ++ ++ status &= ~NV_PGRAPH_INTR_ERROR; ++ NV_WRITE(NV03_PGRAPH_INTR, NV_PGRAPH_INTR_ERROR); ++ } ++ ++ if (status & NV_PGRAPH_INTR_CONTEXT_SWITCH) { ++ nouveau_pgraph_intr_context_switch(dev); ++ ++ status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; ++ NV_WRITE(NV03_PGRAPH_INTR, ++ NV_PGRAPH_INTR_CONTEXT_SWITCH); ++ } ++ ++ if (status) { ++ DRM_INFO("Unhandled PGRAPH_INTR - 0x%08x\n", status); ++ NV_WRITE(NV03_PGRAPH_INTR, status); ++ } ++ ++ if ((NV_READ(NV04_PGRAPH_FIFO) & (1 << 0)) == 0) ++ NV_WRITE(NV04_PGRAPH_FIFO, 1); ++ } ++ ++ NV_WRITE(NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING); ++} ++ ++static void ++nv50_pgraph_irq_handler(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t status; ++ ++ status = NV_READ(NV03_PGRAPH_INTR); ++ ++ if (status & 0x00000020) { ++ nouveau_pgraph_intr_error(dev, ++ NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD); ++ ++ status &= ~0x00000020; ++ NV_WRITE(NV03_PGRAPH_INTR, 0x00000020); ++ } ++ ++ if (status & 0x00100000) { ++ nouveau_pgraph_intr_error(dev, ++ NV03_PGRAPH_NSOURCE_DATA_ERROR); ++ ++ status &= ~0x00100000; ++ NV_WRITE(NV03_PGRAPH_INTR, 0x00100000); ++ } ++ ++ if (status & 0x00200000) { ++ nouveau_pgraph_intr_error(dev, ++ NV03_PGRAPH_NSOURCE_PROTECTION_ERROR); ++ ++ status &= ~0x00200000; ++ NV_WRITE(NV03_PGRAPH_INTR, 0x00200000); ++ } ++ ++ if (status) { ++ DRM_INFO("Unhandled PGRAPH_INTR - 0x%08x\n", status); ++ NV_WRITE(NV03_PGRAPH_INTR, status); ++ } ++ ++ { ++ const int isb = (1 << 16) | (1 << 0); ++ ++ if ((NV_READ(0x400500) & isb) != isb) ++ NV_WRITE(0x400500, NV_READ(0x400500) | isb); ++ } ++ ++ NV_WRITE(NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING); ++} ++ ++static void ++nouveau_crtc_irq_handler(struct drm_device *dev, int crtc) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ if (crtc&1) { ++ NV_WRITE(NV_CRTC0_INTSTAT, NV_CRTC_INTR_VBLANK); ++ } ++ ++ if (crtc&2) { ++ NV_WRITE(NV_CRTC1_INTSTAT, NV_CRTC_INTR_VBLANK); ++ } ++} ++ ++static void ++nouveau_nv50_display_irq_handler(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t val = NV_READ(NV50_DISPLAY_SUPERVISOR); ++ ++ DRM_INFO("NV50_DISPLAY_INTR - 0x%08X\n", val); ++ ++ NV_WRITE(NV50_DISPLAY_SUPERVISOR, val); ++} ++ ++static void ++nouveau_nv50_i2c_irq_handler(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ DRM_INFO("NV50_I2C_INTR - 0x%08X\n", NV_READ(NV50_I2C_CONTROLLER)); ++ ++ /* This seems to be the way to acknowledge an interrupt. */ ++ NV_WRITE(NV50_I2C_CONTROLLER, 0x7FFF7FFF); ++} ++ ++irqreturn_t ++nouveau_irq_handler(DRM_IRQ_ARGS) ++{ ++ struct drm_device *dev = (struct drm_device*)arg; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t status; ++ ++ status = NV_READ(NV03_PMC_INTR_0); ++ if (!status) ++ return IRQ_NONE; ++ ++ if (status & NV_PMC_INTR_0_PFIFO_PENDING) { ++ nouveau_fifo_irq_handler(dev); ++ status &= ~NV_PMC_INTR_0_PFIFO_PENDING; ++ } ++ ++ if (status & NV_PMC_INTR_0_PGRAPH_PENDING) { ++ if (dev_priv->card_type >= NV_50) ++ nv50_pgraph_irq_handler(dev); ++ else ++ nouveau_pgraph_irq_handler(dev); ++ ++ status &= ~NV_PMC_INTR_0_PGRAPH_PENDING; ++ } ++ ++ if (status & NV_PMC_INTR_0_CRTCn_PENDING) { ++ nouveau_crtc_irq_handler(dev, (status>>24)&3); ++ status &= ~NV_PMC_INTR_0_CRTCn_PENDING; ++ } ++ ++ if (status & NV_PMC_INTR_0_NV50_DISPLAY_PENDING) { ++ nouveau_nv50_display_irq_handler(dev); ++ status &= ~NV_PMC_INTR_0_NV50_DISPLAY_PENDING; ++ } ++ ++ if (status & NV_PMC_INTR_0_NV50_I2C_PENDING) { ++ nouveau_nv50_i2c_irq_handler(dev); ++ status &= ~NV_PMC_INTR_0_NV50_I2C_PENDING; ++ } ++ ++ if (status) ++ DRM_ERROR("Unhandled PMC INTR status bits 0x%08x\n", status); ++ ++ return IRQ_HANDLED; ++} +diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c +new file mode 100644 +index 0000000..5e7ac9e +--- /dev/null ++++ b/drivers/gpu/drm/nouveau/nouveau_mem.c +@@ -0,0 +1,868 @@ ++/* ++ * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved. ++ * Copyright 2005 Stephane Marchesin ++ * ++ * The Weather Channel (TM) funded Tungsten Graphics to develop the ++ * initial release of the Radeon 8500 driver under the XFree86 license. ++ * This notice must be preserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE AUTHORS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: ++ * Keith Whitwell ++ */ ++ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "drm_sarea.h" ++#include "nouveau_drv.h" ++ ++static struct mem_block * ++split_block(struct mem_block *p, uint64_t start, uint64_t size, ++ struct drm_file *file_priv) ++{ ++ /* Maybe cut off the start of an existing block */ ++ if (start > p->start) { ++ struct mem_block *newblock = ++ drm_alloc(sizeof(*newblock), DRM_MEM_BUFS); ++ if (!newblock) ++ goto out; ++ newblock->start = start; ++ newblock->size = p->size - (start - p->start); ++ newblock->file_priv = NULL; ++ newblock->next = p->next; ++ newblock->prev = p; ++ p->next->prev = newblock; ++ p->next = newblock; ++ p->size -= newblock->size; ++ p = newblock; ++ } ++ ++ /* Maybe cut off the end of an existing block */ ++ if (size < p->size) { ++ struct mem_block *newblock = ++ drm_alloc(sizeof(*newblock), DRM_MEM_BUFS); ++ if (!newblock) ++ goto out; ++ newblock->start = start + size; ++ newblock->size = p->size - size; ++ newblock->file_priv = NULL; ++ newblock->next = p->next; ++ newblock->prev = p; ++ p->next->prev = newblock; ++ p->next = newblock; ++ p->size = size; ++ } ++ ++out: ++ /* Our block is in the middle */ ++ p->file_priv = file_priv; ++ return p; ++} ++ ++struct mem_block * ++nouveau_mem_alloc_block(struct mem_block *heap, uint64_t size, ++ int align2, struct drm_file *file_priv, int tail) ++{ ++ struct mem_block *p; ++ uint64_t mask = (1 << align2) - 1; ++ ++ if (!heap) ++ return NULL; ++ ++ if (tail) { ++ list_for_each_prev(p, heap) { ++ uint64_t start = ((p->start + p->size) - size) & ~mask; ++ ++ if (p->file_priv == 0 && start >= p->start && ++ start + size <= p->start + p->size) ++ return split_block(p, start, size, file_priv); ++ } ++ } else { ++ list_for_each(p, heap) { ++ uint64_t start = (p->start + mask) & ~mask; ++ ++ if (p->file_priv == 0 && ++ start + size <= p->start + p->size) ++ return split_block(p, start, size, file_priv); ++ } ++ } ++ ++ return NULL; ++} ++ ++static struct mem_block *find_block(struct mem_block *heap, uint64_t start) ++{ ++ struct mem_block *p; ++ ++ list_for_each(p, heap) ++ if (p->start == start) ++ return p; ++ ++ return NULL; ++} ++ ++void nouveau_mem_free_block(struct mem_block *p) ++{ ++ p->file_priv = NULL; ++ ++ /* Assumes a single contiguous range. Needs a special file_priv in ++ * 'heap' to stop it being subsumed. ++ */ ++ if (p->next->file_priv == 0) { ++ struct mem_block *q = p->next; ++ p->size += q->size; ++ p->next = q->next; ++ p->next->prev = p; ++ drm_free(q, sizeof(*q), DRM_MEM_BUFS); ++ } ++ ++ if (p->prev->file_priv == 0) { ++ struct mem_block *q = p->prev; ++ q->size += p->size; ++ q->next = p->next; ++ q->next->prev = q; ++ drm_free(p, sizeof(*q), DRM_MEM_BUFS); ++ } ++} ++ ++/* Initialize. How to check for an uninitialized heap? ++ */ ++int nouveau_mem_init_heap(struct mem_block **heap, uint64_t start, ++ uint64_t size) ++{ ++ struct mem_block *blocks = drm_alloc(sizeof(*blocks), DRM_MEM_BUFS); ++ ++ if (!blocks) ++ return -ENOMEM; ++ ++ *heap = drm_alloc(sizeof(**heap), DRM_MEM_BUFS); ++ if (!*heap) { ++ drm_free(blocks, sizeof(*blocks), DRM_MEM_BUFS); ++ return -ENOMEM; ++ } ++ ++ blocks->start = start; ++ blocks->size = size; ++ blocks->file_priv = NULL; ++ blocks->next = blocks->prev = *heap; ++ ++ memset(*heap, 0, sizeof(**heap)); ++ (*heap)->file_priv = (struct drm_file *) - 1; ++ (*heap)->next = (*heap)->prev = blocks; ++ return 0; ++} ++ ++/* ++ * Free all blocks associated with the releasing file_priv ++ */ ++void nouveau_mem_release(struct drm_file *file_priv, struct mem_block *heap) ++{ ++ struct mem_block *p; ++ ++ if (!heap || !heap->next) ++ return; ++ ++ list_for_each(p, heap) { ++ if (p->file_priv == file_priv) ++ p->file_priv = NULL; ++ } ++ ++ /* Assumes a single contiguous range. Needs a special file_priv in ++ * 'heap' to stop it being subsumed. ++ */ ++ list_for_each(p, heap) { ++ while ((p->file_priv == 0) && (p->next->file_priv == 0) && ++ (p->next!=heap)) { ++ struct mem_block *q = p->next; ++ p->size += q->size; ++ p->next = q->next; ++ p->next->prev = p; ++ drm_free(q, sizeof(*q), DRM_MEM_DRIVER); ++ } ++ } ++} ++ ++/* ++ * Cleanup everything ++ */ ++void nouveau_mem_takedown(struct mem_block **heap) ++{ ++ struct mem_block *p; ++ ++ if (!*heap) ++ return; ++ ++ for (p = (*heap)->next; p != *heap;) { ++ struct mem_block *q = p; ++ p = p->next; ++ drm_free(q, sizeof(*q), DRM_MEM_DRIVER); ++ } ++ ++ drm_free(*heap, sizeof(**heap), DRM_MEM_DRIVER); ++ *heap = NULL; ++} ++ ++void nouveau_mem_close(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ nouveau_mem_takedown(&dev_priv->agp_heap); ++ nouveau_mem_takedown(&dev_priv->fb_heap); ++ if (dev_priv->pci_heap) ++ nouveau_mem_takedown(&dev_priv->pci_heap); ++} ++ ++/*XXX won't work on BSD because of pci_read_config_dword */ ++static uint32_t ++nouveau_mem_fb_amount_igp(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct pci_dev *bridge; ++ uint32_t mem; ++ ++ bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0,1)); ++ if (!bridge) { ++ DRM_ERROR("no bridge device\n"); ++ return 0; ++ } ++ ++ if (dev_priv->flags&NV_NFORCE) { ++ pci_read_config_dword(bridge, 0x7C, &mem); ++ return (uint64_t)(((mem >> 6) & 31) + 1)*1024*1024; ++ } else ++ if(dev_priv->flags&NV_NFORCE2) { ++ pci_read_config_dword(bridge, 0x84, &mem); ++ return (uint64_t)(((mem >> 4) & 127) + 1)*1024*1024; ++ } ++ ++ DRM_ERROR("impossible!\n"); ++ ++ return 0; ++} ++ ++/* returns the amount of FB ram in bytes */ ++uint64_t nouveau_mem_fb_amount(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv=dev->dev_private; ++ switch(dev_priv->card_type) ++ { ++ case NV_04: ++ case NV_05: ++ if (NV_READ(NV03_BOOT_0) & 0x00000100) { ++ return (((NV_READ(NV03_BOOT_0) >> 12) & 0xf)*2+2)*1024*1024; ++ } else ++ switch(NV_READ(NV03_BOOT_0)&NV03_BOOT_0_RAM_AMOUNT) ++ { ++ case NV04_BOOT_0_RAM_AMOUNT_32MB: ++ return 32*1024*1024; ++ case NV04_BOOT_0_RAM_AMOUNT_16MB: ++ return 16*1024*1024; ++ case NV04_BOOT_0_RAM_AMOUNT_8MB: ++ return 8*1024*1024; ++ case NV04_BOOT_0_RAM_AMOUNT_4MB: ++ return 4*1024*1024; ++ } ++ break; ++ case NV_10: ++ case NV_11: ++ case NV_17: ++ case NV_20: ++ case NV_30: ++ case NV_40: ++ case NV_44: ++ case NV_50: ++ default: ++ if (dev_priv->flags & (NV_NFORCE | NV_NFORCE2)) { ++ return nouveau_mem_fb_amount_igp(dev); ++ } else { ++ uint64_t mem; ++ ++ mem = (NV_READ(NV04_FIFO_DATA) & ++ NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK) >> ++ NV10_FIFO_DATA_RAM_AMOUNT_MB_SHIFT; ++ return mem*1024*1024; ++ } ++ break; ++ } ++ ++ DRM_ERROR("Unable to detect video ram size. Please report your setup to " DRIVER_EMAIL "\n"); ++ return 0; ++} ++ ++static void nouveau_mem_reset_agp(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t saved_pci_nv_1, saved_pci_nv_19, pmc_enable; ++ ++ saved_pci_nv_1 = NV_READ(NV04_PBUS_PCI_NV_1); ++ saved_pci_nv_19 = NV_READ(NV04_PBUS_PCI_NV_19); ++ ++ /* clear busmaster bit */ ++ NV_WRITE(NV04_PBUS_PCI_NV_1, saved_pci_nv_1 & ~0x4); ++ /* clear SBA and AGP bits */ ++ NV_WRITE(NV04_PBUS_PCI_NV_19, saved_pci_nv_19 & 0xfffff0ff); ++ ++ /* power cycle pgraph, if enabled */ ++ pmc_enable = NV_READ(NV03_PMC_ENABLE); ++ if (pmc_enable & NV_PMC_ENABLE_PGRAPH) { ++ NV_WRITE(NV03_PMC_ENABLE, pmc_enable & ~NV_PMC_ENABLE_PGRAPH); ++ NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) | ++ NV_PMC_ENABLE_PGRAPH); ++ } ++ ++ /* and restore (gives effect of resetting AGP) */ ++ NV_WRITE(NV04_PBUS_PCI_NV_19, saved_pci_nv_19); ++ NV_WRITE(NV04_PBUS_PCI_NV_1, saved_pci_nv_1); ++} ++ ++static int ++nouveau_mem_init_agp(struct drm_device *dev, int ttm) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct drm_agp_info info; ++ struct drm_agp_mode mode; ++ int ret; ++ ++ nouveau_mem_reset_agp(dev); ++ ++ ret = drm_agp_acquire(dev); ++ if (ret) { ++ DRM_ERROR("Unable to acquire AGP: %d\n", ret); ++ return ret; ++ } ++ ++ ret = drm_agp_info(dev, &info); ++ if (ret) { ++ DRM_ERROR("Unable to get AGP info: %d\n", ret); ++ return ret; ++ } ++ ++ /* see agp.h for the AGPSTAT_* modes available */ ++ mode.mode = info.mode; ++ ret = drm_agp_enable(dev, mode); ++ if (ret) { ++ DRM_ERROR("Unable to enable AGP: %d\n", ret); ++ return ret; ++ } ++ ++ if (!ttm) { ++ struct drm_agp_buffer agp_req; ++ struct drm_agp_binding bind_req; ++ ++ agp_req.size = info.aperture_size; ++ agp_req.type = 0; ++ ret = drm_agp_alloc(dev, &agp_req); ++ if (ret) { ++ DRM_ERROR("Unable to alloc AGP: %d\n", ret); ++ return ret; ++ } ++ ++ bind_req.handle = agp_req.handle; ++ bind_req.offset = 0; ++ ret = drm_agp_bind(dev, &bind_req); ++ if (ret) { ++ DRM_ERROR("Unable to bind AGP: %d\n", ret); ++ return ret; ++ } ++ } ++ ++ dev_priv->gart_info.type = NOUVEAU_GART_AGP; ++ dev_priv->gart_info.aper_base = info.aperture_base; ++ dev_priv->gart_info.aper_size = info.aperture_size; ++ return 0; ++} ++ ++#define HACK_OLD_MM ++int ++nouveau_mem_init_ttm(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t vram_size, bar1_size; ++ int ret; ++ ++ dev_priv->agp_heap = dev_priv->pci_heap = dev_priv->fb_heap = NULL; ++ dev_priv->fb_phys = drm_get_resource_start(dev,1); ++ dev_priv->gart_info.type = NOUVEAU_GART_NONE; ++ ++ drm_bo_driver_init(dev); ++ ++ /* non-mappable vram */ ++ dev_priv->fb_available_size = nouveau_mem_fb_amount(dev); ++ dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram; ++ vram_size = dev_priv->fb_available_size >> PAGE_SHIFT; ++ bar1_size = drm_get_resource_len(dev, 1) >> PAGE_SHIFT; ++ if (bar1_size < vram_size) { ++ if ((ret = drm_bo_init_mm(dev, DRM_BO_MEM_PRIV0, ++ bar1_size, vram_size - bar1_size, 1))) { ++ DRM_ERROR("Failed PRIV0 mm init: %d\n", ret); ++ return ret; ++ } ++ vram_size = bar1_size; ++ } ++ ++ /* mappable vram */ ++#ifdef HACK_OLD_MM ++ vram_size /= 4; ++#endif ++ if ((ret = drm_bo_init_mm(dev, DRM_BO_MEM_VRAM, 0, vram_size, 1))) { ++ DRM_ERROR("Failed VRAM mm init: %d\n", ret); ++ return ret; ++ } ++ ++ /* GART */ ++#if !defined(__powerpc__) && !defined(__ia64__) ++ if (drm_device_is_agp(dev) && dev->agp) { ++ if ((ret = nouveau_mem_init_agp(dev, 1))) ++ DRM_ERROR("Error initialising AGP: %d\n", ret); ++ } ++#endif ++ ++ if (dev_priv->gart_info.type == NOUVEAU_GART_NONE) { ++ if ((ret = nouveau_sgdma_init(dev))) ++ DRM_ERROR("Error initialising PCI SGDMA: %d\n", ret); ++ } ++ ++ if ((ret = drm_bo_init_mm(dev, DRM_BO_MEM_TT, 0, ++ dev_priv->gart_info.aper_size >> ++ PAGE_SHIFT, 1))) { ++ DRM_ERROR("Failed TT mm init: %d\n", ret); ++ return ret; ++ } ++ ++#ifdef HACK_OLD_MM ++ vram_size <<= PAGE_SHIFT; ++ DRM_INFO("Old MM using %dKiB VRAM\n", (vram_size * 3) >> 10); ++ if (nouveau_mem_init_heap(&dev_priv->fb_heap, vram_size, vram_size * 3)) ++ return -ENOMEM; ++#endif ++ ++ return 0; ++} ++ ++int nouveau_mem_init(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t fb_size; ++ int ret = 0; ++ ++ dev_priv->agp_heap = dev_priv->pci_heap = dev_priv->fb_heap = NULL; ++ dev_priv->fb_phys = 0; ++ dev_priv->gart_info.type = NOUVEAU_GART_NONE; ++ ++ /* setup a mtrr over the FB */ ++ dev_priv->fb_mtrr = drm_mtrr_add(drm_get_resource_start(dev, 1), ++ nouveau_mem_fb_amount(dev), ++ DRM_MTRR_WC); ++ ++ /* Init FB */ ++ dev_priv->fb_phys=drm_get_resource_start(dev,1); ++ fb_size = nouveau_mem_fb_amount(dev); ++ /* On G80, limit VRAM to 512MiB temporarily due to limits in how ++ * we handle VRAM page tables. ++ */ ++ if (dev_priv->card_type >= NV_50 && fb_size > (512 * 1024 * 1024)) ++ fb_size = (512 * 1024 * 1024); ++ /* On at least NV40, RAMIN is actually at the end of vram. ++ * We don't want to allocate this... */ ++ if (dev_priv->card_type >= NV_40) ++ fb_size -= dev_priv->ramin_rsvd_vram; ++ dev_priv->fb_available_size = fb_size; ++ DRM_DEBUG("Available VRAM: %dKiB\n", fb_size>>10); ++ ++ if (fb_size>256*1024*1024) { ++ /* On cards with > 256Mb, you can't map everything. ++ * So we create a second FB heap for that type of memory */ ++ if (nouveau_mem_init_heap(&dev_priv->fb_heap, ++ 0, 256*1024*1024)) ++ return -ENOMEM; ++ if (nouveau_mem_init_heap(&dev_priv->fb_nomap_heap, ++ 256*1024*1024, fb_size-256*1024*1024)) ++ return -ENOMEM; ++ } else { ++ if (nouveau_mem_init_heap(&dev_priv->fb_heap, 0, fb_size)) ++ return -ENOMEM; ++ dev_priv->fb_nomap_heap=NULL; ++ } ++ ++#if !defined(__powerpc__) && !defined(__ia64__) ++ /* Init AGP / NV50 PCIEGART */ ++ if (drm_device_is_agp(dev) && dev->agp) { ++ if ((ret = nouveau_mem_init_agp(dev, 0))) ++ DRM_ERROR("Error initialising AGP: %d\n", ret); ++ } ++#endif ++ ++ /*Note: this is *not* just NV50 code, but only used on NV50 for now */ ++ if (dev_priv->gart_info.type == NOUVEAU_GART_NONE && ++ dev_priv->card_type >= NV_50) { ++ ret = nouveau_sgdma_init(dev); ++ if (!ret) { ++ ret = nouveau_sgdma_nottm_hack_init(dev); ++ if (ret) ++ nouveau_sgdma_takedown(dev); ++ } ++ ++ if (ret) ++ DRM_ERROR("Error initialising SG DMA: %d\n", ret); ++ } ++ ++ if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) { ++ if (nouveau_mem_init_heap(&dev_priv->agp_heap, ++ 0, dev_priv->gart_info.aper_size)) { ++ if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA) { ++ nouveau_sgdma_nottm_hack_takedown(dev); ++ nouveau_sgdma_takedown(dev); ++ } ++ } ++ } ++ ++ /* NV04-NV40 PCIEGART */ ++ if (!dev_priv->agp_heap && dev_priv->card_type < NV_50) { ++ struct drm_scatter_gather sgreq; ++ ++ DRM_DEBUG("Allocating sg memory for PCI DMA\n"); ++ sgreq.size = 16 << 20; //16MB of PCI scatter-gather zone ++ ++ if (drm_sg_alloc(dev, &sgreq)) { ++ DRM_ERROR("Unable to allocate %ldMB of scatter-gather" ++ " pages for PCI DMA!",sgreq.size>>20); ++ } else { ++ if (nouveau_mem_init_heap(&dev_priv->pci_heap, 0, ++ dev->sg->pages * PAGE_SIZE)) { ++ DRM_ERROR("Unable to initialize pci_heap!"); ++ } ++ } ++ } ++ ++ /* G8x: Allocate shared page table to map real VRAM pages into */ ++ if (dev_priv->card_type >= NV_50) { ++ unsigned size = ((512 * 1024 * 1024) / 65536) * 8; ++ ++ ret = nouveau_gpuobj_new(dev, NULL, size, 0, ++ NVOBJ_FLAG_ZERO_ALLOC | ++ NVOBJ_FLAG_ALLOW_NO_REFS, ++ &dev_priv->vm_vram_pt); ++ if (ret) { ++ DRM_ERROR("Error creating VRAM page table: %d\n", ret); ++ return ret; ++ } ++ } ++ ++ ++ return 0; ++} ++ ++struct mem_block * ++nouveau_mem_alloc(struct drm_device *dev, int alignment, uint64_t size, ++ int flags, struct drm_file *file_priv) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct mem_block *block; ++ int type, tail = !(flags & NOUVEAU_MEM_USER); ++ ++ /* ++ * Make things easier on ourselves: all allocations are page-aligned. ++ * We need that to map allocated regions into the user space ++ */ ++ if (alignment < PAGE_SHIFT) ++ alignment = PAGE_SHIFT; ++ ++ /* Align allocation sizes to 64KiB blocks on G8x. We use a 64KiB ++ * page size in the GPU VM. ++ */ ++ if (flags & NOUVEAU_MEM_FB && dev_priv->card_type >= NV_50) { ++ size = (size + 65535) & ~65535; ++ if (alignment < 16) ++ alignment = 16; ++ } ++ ++ /* ++ * Warn about 0 sized allocations, but let it go through. It'll return 1 page ++ */ ++ if (size == 0) ++ DRM_INFO("warning : 0 byte allocation\n"); ++ ++ /* ++ * Keep alloc size a multiple of the page size to keep drm_addmap() happy ++ */ ++ if (size & (~PAGE_MASK)) ++ size = ((size/PAGE_SIZE) + 1) * PAGE_SIZE; ++ ++ ++#define NOUVEAU_MEM_ALLOC_AGP {\ ++ type=NOUVEAU_MEM_AGP;\ ++ block = nouveau_mem_alloc_block(dev_priv->agp_heap, size,\ ++ alignment, file_priv, tail); \ ++ if (block) goto alloc_ok;\ ++ } ++ ++#define NOUVEAU_MEM_ALLOC_PCI {\ ++ type = NOUVEAU_MEM_PCI;\ ++ block = nouveau_mem_alloc_block(dev_priv->pci_heap, size, \ ++ alignment, file_priv, tail); \ ++ if ( block ) goto alloc_ok;\ ++ } ++ ++#define NOUVEAU_MEM_ALLOC_FB {\ ++ type=NOUVEAU_MEM_FB;\ ++ if (!(flags&NOUVEAU_MEM_MAPPED)) {\ ++ block = nouveau_mem_alloc_block(dev_priv->fb_nomap_heap,\ ++ size, alignment, \ ++ file_priv, tail); \ ++ if (block) goto alloc_ok;\ ++ }\ ++ block = nouveau_mem_alloc_block(dev_priv->fb_heap, size,\ ++ alignment, file_priv, tail);\ ++ if (block) goto alloc_ok;\ ++ } ++ ++ ++ if (flags&NOUVEAU_MEM_FB) NOUVEAU_MEM_ALLOC_FB ++ if (flags&NOUVEAU_MEM_AGP) NOUVEAU_MEM_ALLOC_AGP ++ if (flags&NOUVEAU_MEM_PCI) NOUVEAU_MEM_ALLOC_PCI ++ if (flags&NOUVEAU_MEM_FB_ACCEPTABLE) NOUVEAU_MEM_ALLOC_FB ++ if (flags&NOUVEAU_MEM_AGP_ACCEPTABLE) NOUVEAU_MEM_ALLOC_AGP ++ if (flags&NOUVEAU_MEM_PCI_ACCEPTABLE) NOUVEAU_MEM_ALLOC_PCI ++ ++ ++ return NULL; ++ ++alloc_ok: ++ block->flags=type; ++ ++ /* On G8x, map memory into VM */ ++ if (block->flags & NOUVEAU_MEM_FB && dev_priv->card_type >= NV_50 && ++ !(flags & NOUVEAU_MEM_NOVM)) { ++ struct nouveau_gpuobj *pt = dev_priv->vm_vram_pt; ++ unsigned offset = block->start; ++ unsigned count = block->size / 65536; ++ unsigned tile = 0; ++ ++ if (!pt) { ++ DRM_ERROR("vm alloc without vm pt\n"); ++ nouveau_mem_free_block(block); ++ return NULL; ++ } ++ ++ /* The tiling stuff is *not* what NVIDIA does - but both the ++ * 2D and 3D engines seem happy with this simpler method. ++ * Should look into why NVIDIA do what they do at some point. ++ */ ++ if (flags & NOUVEAU_MEM_TILE) { ++ if (flags & NOUVEAU_MEM_TILE_ZETA) ++ tile = 0x00002800; ++ else ++ tile = 0x00007000; ++ } ++ ++ while (count--) { ++ unsigned pte = offset / 65536; ++ ++ INSTANCE_WR(pt, (pte * 2) + 0, offset | 1); ++ INSTANCE_WR(pt, (pte * 2) + 1, 0x00000000 | tile); ++ offset += 65536; ++ } ++ } else { ++ block->flags |= NOUVEAU_MEM_NOVM; ++ } ++ ++ if (flags&NOUVEAU_MEM_MAPPED) ++ { ++ struct drm_map_list *entry; ++ int ret = 0; ++ block->flags|=NOUVEAU_MEM_MAPPED; ++ ++ if (type == NOUVEAU_MEM_AGP) { ++ if (dev_priv->gart_info.type != NOUVEAU_GART_SGDMA) ++ ret = drm_addmap(dev, block->start, block->size, ++ _DRM_AGP, 0, &block->map); ++ else ++ ret = drm_addmap(dev, block->start, block->size, ++ _DRM_SCATTER_GATHER, 0, &block->map); ++ } ++ else if (type == NOUVEAU_MEM_FB) ++ ret = drm_addmap(dev, block->start + dev_priv->fb_phys, ++ block->size, _DRM_FRAME_BUFFER, ++ 0, &block->map); ++ else if (type == NOUVEAU_MEM_PCI) ++ ret = drm_addmap(dev, block->start, block->size, ++ _DRM_SCATTER_GATHER, 0, &block->map); ++ ++ if (ret) { ++ nouveau_mem_free_block(block); ++ return NULL; ++ } ++ ++ entry = drm_find_matching_map(dev, block->map); ++ if (!entry) { ++ nouveau_mem_free_block(block); ++ return NULL; ++ } ++ block->map_handle = entry->user_token; ++ } ++ ++ DRM_DEBUG("allocated %lld bytes at 0x%llx type=0x%08x\n", block->size, block->start, block->flags); ++ return block; ++} ++ ++void nouveau_mem_free(struct drm_device* dev, struct mem_block* block) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ DRM_DEBUG("freeing 0x%llx type=0x%08x\n", block->start, block->flags); ++ ++ if (block->flags&NOUVEAU_MEM_MAPPED) ++ drm_rmmap(dev, block->map); ++ ++ /* G8x: Remove pages from vm */ ++ if (block->flags & NOUVEAU_MEM_FB && dev_priv->card_type >= NV_50 && ++ !(block->flags & NOUVEAU_MEM_NOVM)) { ++ struct nouveau_gpuobj *pt = dev_priv->vm_vram_pt; ++ unsigned offset = block->start; ++ unsigned count = block->size / 65536; ++ ++ if (!pt) { ++ DRM_ERROR("vm free without vm pt\n"); ++ goto out_free; ++ } ++ ++ while (count--) { ++ unsigned pte = offset / 65536; ++ INSTANCE_WR(pt, (pte * 2) + 0, 0); ++ INSTANCE_WR(pt, (pte * 2) + 1, 0); ++ offset += 65536; ++ } ++ } ++ ++out_free: ++ nouveau_mem_free_block(block); ++} ++ ++/* ++ * Ioctls ++ */ ++ ++int ++nouveau_ioctl_mem_alloc(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct drm_nouveau_mem_alloc *alloc = data; ++ struct mem_block *block; ++ ++ NOUVEAU_CHECK_INITIALISED_WITH_RETURN; ++ ++ if (alloc->flags & NOUVEAU_MEM_INTERNAL) ++ return -EINVAL; ++ ++ block = nouveau_mem_alloc(dev, alloc->alignment, alloc->size, ++ alloc->flags | NOUVEAU_MEM_USER, file_priv); ++ if (!block) ++ return -ENOMEM; ++ alloc->map_handle=block->map_handle; ++ alloc->offset=block->start; ++ alloc->flags=block->flags; ++ ++ if (dev_priv->card_type >= NV_50 && alloc->flags & NOUVEAU_MEM_FB) ++ alloc->offset += 512*1024*1024; ++ ++ return 0; ++} ++ ++int ++nouveau_ioctl_mem_free(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct drm_nouveau_mem_free *memfree = data; ++ struct mem_block *block; ++ ++ NOUVEAU_CHECK_INITIALISED_WITH_RETURN; ++ ++ if (dev_priv->card_type >= NV_50 && memfree->flags & NOUVEAU_MEM_FB) ++ memfree->offset -= 512*1024*1024; ++ ++ block=NULL; ++ if (memfree->flags & NOUVEAU_MEM_FB) ++ block = find_block(dev_priv->fb_heap, memfree->offset); ++ else if (memfree->flags & NOUVEAU_MEM_AGP) ++ block = find_block(dev_priv->agp_heap, memfree->offset); ++ else if (memfree->flags & NOUVEAU_MEM_PCI) ++ block = find_block(dev_priv->pci_heap, memfree->offset); ++ if (!block) ++ return -EFAULT; ++ if (block->file_priv != file_priv) ++ return -EPERM; ++ ++ nouveau_mem_free(dev, block); ++ return 0; ++} ++ ++int ++nouveau_ioctl_mem_tile(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct drm_nouveau_mem_tile *memtile = data; ++ struct mem_block *block = NULL; ++ ++ NOUVEAU_CHECK_INITIALISED_WITH_RETURN; ++ ++ if (dev_priv->card_type < NV_50) ++ return -EINVAL; ++ ++ if (memtile->flags & NOUVEAU_MEM_FB) { ++ memtile->offset -= 512*1024*1024; ++ block = find_block(dev_priv->fb_heap, memtile->offset); ++ } ++ ++ if (!block) ++ return -EINVAL; ++ ++ if (block->file_priv != file_priv) ++ return -EPERM; ++ ++ { ++ struct nouveau_gpuobj *pt = dev_priv->vm_vram_pt; ++ unsigned offset = block->start + memtile->delta; ++ unsigned count = memtile->size / 65536; ++ unsigned tile = 0; ++ ++ if (memtile->flags & NOUVEAU_MEM_TILE) { ++ if (memtile->flags & NOUVEAU_MEM_TILE_ZETA) ++ tile = 0x00002800; ++ else ++ tile = 0x00007000; ++ } ++ ++ while (count--) { ++ unsigned pte = offset / 65536; ++ ++ INSTANCE_WR(pt, (pte * 2) + 0, offset | 1); ++ INSTANCE_WR(pt, (pte * 2) + 1, 0x00000000 | tile); ++ offset += 65536; ++ } ++ } ++ ++ return 0; ++} ++ +diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c +new file mode 100644 +index 0000000..edece4d +--- /dev/null ++++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c +@@ -0,0 +1,165 @@ ++/* ++ * Copyright (C) 2007 Ben Skeggs. ++ * ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining ++ * a copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sublicense, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial ++ * portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. ++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE ++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION ++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION ++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "nouveau_drv.h" ++ ++int ++nouveau_notifier_init_channel(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ int flags, ret; ++ ++ flags = (NOUVEAU_MEM_PCI | NOUVEAU_MEM_MAPPED | ++ NOUVEAU_MEM_FB_ACCEPTABLE); ++ ++ chan->notifier_block = nouveau_mem_alloc(dev, 0, PAGE_SIZE, flags, ++ (struct drm_file *)-2); ++ if (!chan->notifier_block) ++ return -ENOMEM; ++ DRM_DEBUG("Allocated notifier block in 0x%08x\n", ++ chan->notifier_block->flags); ++ ++ ret = nouveau_mem_init_heap(&chan->notifier_heap, ++ 0, chan->notifier_block->size); ++ if (ret) ++ return ret; ++ ++ return 0; ++} ++ ++void ++nouveau_notifier_takedown_channel(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ ++ if (chan->notifier_block) { ++ nouveau_mem_free(dev, chan->notifier_block); ++ chan->notifier_block = NULL; ++ } ++ ++ nouveau_mem_takedown(&chan->notifier_heap); ++} ++ ++static void ++nouveau_notifier_gpuobj_dtor(struct drm_device *dev, ++ struct nouveau_gpuobj *gpuobj) ++{ ++ DRM_DEBUG("\n"); ++ ++ if (gpuobj->priv) ++ nouveau_mem_free_block(gpuobj->priv); ++} ++ ++int ++nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle, ++ int count, uint32_t *b_offset) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_gpuobj *nobj = NULL; ++ struct mem_block *mem; ++ uint32_t offset; ++ int target, ret; ++ ++ if (!chan->notifier_heap) { ++ DRM_ERROR("Channel %d doesn't have a notifier heap!\n", ++ chan->id); ++ return -EINVAL; ++ } ++ ++ mem = nouveau_mem_alloc_block(chan->notifier_heap, count*32, 0, ++ (struct drm_file *)-2, 0); ++ if (!mem) { ++ DRM_ERROR("Channel %d notifier block full\n", chan->id); ++ return -ENOMEM; ++ } ++ mem->flags = NOUVEAU_MEM_NOTIFIER; ++ ++ offset = chan->notifier_block->start; ++ if (chan->notifier_block->flags & NOUVEAU_MEM_FB) { ++ target = NV_DMA_TARGET_VIDMEM; ++ } else ++ if (chan->notifier_block->flags & NOUVEAU_MEM_AGP) { ++ if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA && ++ dev_priv->card_type < NV_50) { ++ ret = nouveau_sgdma_get_page(dev, offset, &offset); ++ if (ret) ++ return ret; ++ target = NV_DMA_TARGET_PCI; ++ } else { ++ target = NV_DMA_TARGET_AGP; ++ } ++ } else ++ if (chan->notifier_block->flags & NOUVEAU_MEM_PCI) { ++ target = NV_DMA_TARGET_PCI_NONLINEAR; ++ } else { ++ DRM_ERROR("Bad DMA target, flags 0x%08x!\n", ++ chan->notifier_block->flags); ++ return -EINVAL; ++ } ++ offset += mem->start; ++ ++ if ((ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, ++ offset, mem->size, ++ NV_DMA_ACCESS_RW, target, &nobj))) { ++ nouveau_mem_free_block(mem); ++ DRM_ERROR("Error creating notifier ctxdma: %d\n", ret); ++ return ret; ++ } ++ nobj->dtor = nouveau_notifier_gpuobj_dtor; ++ nobj->priv = mem; ++ ++ if ((ret = nouveau_gpuobj_ref_add(dev, chan, handle, nobj, NULL))) { ++ nouveau_gpuobj_del(dev, &nobj); ++ nouveau_mem_free_block(mem); ++ DRM_ERROR("Error referencing notifier ctxdma: %d\n", ret); ++ return ret; ++ } ++ ++ *b_offset = mem->start; ++ return 0; ++} ++ ++int ++nouveau_ioctl_notifier_alloc(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_nouveau_notifierobj_alloc *na = data; ++ struct nouveau_channel *chan; ++ int ret; ++ ++ NOUVEAU_CHECK_INITIALISED_WITH_RETURN; ++ NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(na->channel, file_priv, chan); ++ ++ ret = nouveau_notifier_alloc(chan, na->handle, na->count, &na->offset); ++ if (ret) ++ return ret; ++ ++ return 0; ++} +diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c +new file mode 100644 +index 0000000..ea2ed5a +--- /dev/null ++++ b/drivers/gpu/drm/nouveau/nouveau_object.c +@@ -0,0 +1,1173 @@ ++/* ++ * Copyright (C) 2006 Ben Skeggs. ++ * ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining ++ * a copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sublicense, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial ++ * portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. ++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE ++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION ++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION ++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ */ ++ ++/* ++ * Authors: ++ * Ben Skeggs ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "nouveau_drv.h" ++#include "nouveau_drm.h" ++ ++/* NVidia uses context objects to drive drawing operations. ++ ++ Context objects can be selected into 8 subchannels in the FIFO, ++ and then used via DMA command buffers. ++ ++ A context object is referenced by a user defined handle (CARD32). The HW ++ looks up graphics objects in a hash table in the instance RAM. ++ ++ An entry in the hash table consists of 2 CARD32. The first CARD32 contains ++ the handle, the second one a bitfield, that contains the address of the ++ object in instance RAM. ++ ++ The format of the second CARD32 seems to be: ++ ++ NV4 to NV30: ++ ++ 15: 0 instance_addr >> 4 ++ 17:16 engine (here uses 1 = graphics) ++ 28:24 channel id (here uses 0) ++ 31 valid (use 1) ++ ++ NV40: ++ ++ 15: 0 instance_addr >> 4 (maybe 19-0) ++ 21:20 engine (here uses 1 = graphics) ++ I'm unsure about the other bits, but using 0 seems to work. ++ ++ The key into the hash table depends on the object handle and channel id and ++ is given as: ++*/ ++static uint32_t ++nouveau_ramht_hash_handle(struct drm_device *dev, int channel, uint32_t handle) ++{ ++ struct drm_nouveau_private *dev_priv=dev->dev_private; ++ uint32_t hash = 0; ++ int i; ++ ++ DRM_DEBUG("ch%d handle=0x%08x\n", channel, handle); ++ ++ for (i=32;i>0;i-=dev_priv->ramht_bits) { ++ hash ^= (handle & ((1 << dev_priv->ramht_bits) - 1)); ++ handle >>= dev_priv->ramht_bits; ++ } ++ if (dev_priv->card_type < NV_50) ++ hash ^= channel << (dev_priv->ramht_bits - 4); ++ hash <<= 3; ++ ++ DRM_DEBUG("hash=0x%08x\n", hash); ++ return hash; ++} ++ ++static int ++nouveau_ramht_entry_valid(struct drm_device *dev, struct nouveau_gpuobj *ramht, ++ uint32_t offset) ++{ ++ struct drm_nouveau_private *dev_priv=dev->dev_private; ++ uint32_t ctx = INSTANCE_RD(ramht, (offset + 4)/4); ++ ++ if (dev_priv->card_type < NV_40) ++ return ((ctx & NV_RAMHT_CONTEXT_VALID) != 0); ++ return (ctx != 0); ++} ++ ++static int ++nouveau_ramht_insert(struct drm_device *dev, struct nouveau_gpuobj_ref *ref) ++{ ++ struct drm_nouveau_private *dev_priv=dev->dev_private; ++ struct nouveau_channel *chan = dev_priv->fifos[ref->channel]; ++ struct nouveau_gpuobj *ramht = chan->ramht ? chan->ramht->gpuobj : NULL; ++ struct nouveau_gpuobj *gpuobj = ref->gpuobj; ++ uint32_t ctx, co, ho; ++ ++ if (!ramht) { ++ DRM_ERROR("No hash table!\n"); ++ return -EINVAL; ++ } ++ ++ if (dev_priv->card_type < NV_40) { ++ ctx = NV_RAMHT_CONTEXT_VALID | (ref->instance >> 4) | ++ (ref->channel << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) | ++ (gpuobj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT); ++ } else ++ if (dev_priv->card_type < NV_50) { ++ ctx = (ref->instance >> 4) | ++ (ref->channel << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) | ++ (gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT); ++ } else { ++ ctx = (ref->instance >> 4) | ++ (gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT); ++ } ++ ++ co = ho = nouveau_ramht_hash_handle(dev, ref->channel, ref->handle); ++ do { ++ if (!nouveau_ramht_entry_valid(dev, ramht, co)) { ++ DRM_DEBUG("insert ch%d 0x%08x: h=0x%08x, c=0x%08x\n", ++ ref->channel, co, ref->handle, ctx); ++ INSTANCE_WR(ramht, (co + 0)/4, ref->handle); ++ INSTANCE_WR(ramht, (co + 4)/4, ctx); ++ ++ list_add_tail(&ref->list, &chan->ramht_refs); ++ return 0; ++ } ++ DRM_DEBUG("collision ch%d 0x%08x: h=0x%08x\n", ++ ref->channel, co, INSTANCE_RD(ramht, co/4)); ++ ++ co += 8; ++ if (co >= dev_priv->ramht_size) { ++ DRM_INFO("no space left after collision\n"); ++ co = 0; ++ /* exit as it seems to cause crash with nouveau_demo and ++ * 0xdead0001 object */ ++ break; ++ } ++ } while (co != ho); ++ ++ DRM_ERROR("RAMHT space exhausted. ch=%d\n", ref->channel); ++ return -ENOMEM; ++} ++ ++static void ++nouveau_ramht_remove(struct drm_device *dev, struct nouveau_gpuobj_ref *ref) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_channel *chan = dev_priv->fifos[ref->channel]; ++ struct nouveau_gpuobj *ramht = chan->ramht ? chan->ramht->gpuobj : NULL; ++ uint32_t co, ho; ++ ++ if (!ramht) { ++ DRM_ERROR("No hash table!\n"); ++ return; ++ } ++ ++ co = ho = nouveau_ramht_hash_handle(dev, ref->channel, ref->handle); ++ do { ++ if (nouveau_ramht_entry_valid(dev, ramht, co) && ++ (ref->handle == INSTANCE_RD(ramht, (co/4)))) { ++ DRM_DEBUG("remove ch%d 0x%08x: h=0x%08x, c=0x%08x\n", ++ ref->channel, co, ref->handle, ++ INSTANCE_RD(ramht, (co + 4))); ++ INSTANCE_WR(ramht, (co + 0)/4, 0x00000000); ++ INSTANCE_WR(ramht, (co + 4)/4, 0x00000000); ++ ++ list_del(&ref->list); ++ return; ++ } ++ ++ co += 8; ++ if (co >= dev_priv->ramht_size) ++ co = 0; ++ } while (co != ho); ++ ++ DRM_ERROR("RAMHT entry not found. ch=%d, handle=0x%08x\n", ++ ref->channel, ref->handle); ++} ++ ++int ++nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, ++ int size, int align, uint32_t flags, ++ struct nouveau_gpuobj **gpuobj_ret) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_engine *engine = &dev_priv->Engine; ++ struct nouveau_gpuobj *gpuobj; ++ struct mem_block *pramin = NULL; ++ int ret; ++ ++ DRM_DEBUG("ch%d size=%d align=%d flags=0x%08x\n", ++ chan ? chan->id : -1, size, align, flags); ++ ++ if (!dev_priv || !gpuobj_ret || *gpuobj_ret != NULL) ++ return -EINVAL; ++ ++ gpuobj = drm_calloc(1, sizeof(*gpuobj), DRM_MEM_DRIVER); ++ if (!gpuobj) ++ return -ENOMEM; ++ DRM_DEBUG("gpuobj %p\n", gpuobj); ++ gpuobj->flags = flags; ++ gpuobj->im_channel = chan ? chan->id : -1; ++ ++ list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); ++ ++ /* Choose between global instmem heap, and per-channel private ++ * instmem heap. On ramin_heap) { ++ DRM_DEBUG("private heap\n"); ++ pramin = chan->ramin_heap; ++ } else ++ if (dev_priv->card_type < NV_50) { ++ DRM_DEBUG("global heap fallback\n"); ++ pramin = dev_priv->ramin_heap; ++ } ++ } else { ++ DRM_DEBUG("global heap\n"); ++ pramin = dev_priv->ramin_heap; ++ } ++ ++ if (!pramin) { ++ DRM_ERROR("No PRAMIN heap!\n"); ++ return -EINVAL; ++ } ++ ++ if (!chan && (ret = engine->instmem.populate(dev, gpuobj, &size))) { ++ nouveau_gpuobj_del(dev, &gpuobj); ++ return ret; ++ } ++ ++ /* Allocate a chunk of the PRAMIN aperture */ ++ gpuobj->im_pramin = nouveau_mem_alloc_block(pramin, size, ++ drm_order(align), ++ (struct drm_file *)-2, 0); ++ if (!gpuobj->im_pramin) { ++ nouveau_gpuobj_del(dev, &gpuobj); ++ return -ENOMEM; ++ } ++ gpuobj->im_pramin->flags = NOUVEAU_MEM_INSTANCE; ++ ++ if (!chan && (ret = engine->instmem.bind(dev, gpuobj))) { ++ nouveau_gpuobj_del(dev, &gpuobj); ++ return ret; ++ } ++ ++ if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { ++ int i; ++ ++ for (i = 0; i < gpuobj->im_pramin->size; i += 4) ++ INSTANCE_WR(gpuobj, i/4, 0); ++ } ++ ++ *gpuobj_ret = gpuobj; ++ return 0; ++} ++ ++int ++nouveau_gpuobj_early_init(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ DRM_DEBUG("\n"); ++ ++ INIT_LIST_HEAD(&dev_priv->gpuobj_list); ++ ++ return 0; ++} ++ ++int ++nouveau_gpuobj_init(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int ret; ++ ++ DRM_DEBUG("\n"); ++ ++ if (dev_priv->card_type < NV_50) { ++ if ((ret = nouveau_gpuobj_new_fake(dev, dev_priv->ramht_offset, ++ ~0, dev_priv->ramht_size, ++ NVOBJ_FLAG_ZERO_ALLOC | ++ NVOBJ_FLAG_ALLOW_NO_REFS, ++ &dev_priv->ramht, NULL))) ++ return ret; ++ } ++ ++ return 0; ++} ++ ++void ++nouveau_gpuobj_takedown(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ DRM_DEBUG("\n"); ++ ++ nouveau_gpuobj_del(dev, &dev_priv->ramht); ++} ++ ++void ++nouveau_gpuobj_late_takedown(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_gpuobj *gpuobj = NULL; ++ struct list_head *entry, *tmp; ++ ++ DRM_DEBUG("\n"); ++ ++ list_for_each_safe(entry, tmp, &dev_priv->gpuobj_list) { ++ gpuobj = list_entry(entry, struct nouveau_gpuobj, list); ++ ++ DRM_ERROR("gpuobj %p still exists at takedown, refs=%d\n", ++ gpuobj, gpuobj->refcount); ++ gpuobj->refcount = 0; ++ nouveau_gpuobj_del(dev, &gpuobj); ++ } ++} ++ ++int ++nouveau_gpuobj_del(struct drm_device *dev, struct nouveau_gpuobj **pgpuobj) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_engine *engine = &dev_priv->Engine; ++ struct nouveau_gpuobj *gpuobj; ++ ++ DRM_DEBUG("gpuobj %p\n", pgpuobj ? *pgpuobj : NULL); ++ ++ if (!dev_priv || !pgpuobj || !(*pgpuobj)) ++ return -EINVAL; ++ gpuobj = *pgpuobj; ++ ++ if (gpuobj->refcount != 0) { ++ DRM_ERROR("gpuobj refcount is %d\n", gpuobj->refcount); ++ return -EINVAL; ++ } ++ ++ if (gpuobj->dtor) ++ gpuobj->dtor(dev, gpuobj); ++ ++ if (gpuobj->im_backing) { ++ if (gpuobj->flags & NVOBJ_FLAG_FAKE) ++ drm_free(gpuobj->im_backing, ++ sizeof(*gpuobj->im_backing), DRM_MEM_DRIVER); ++ else ++ engine->instmem.clear(dev, gpuobj); ++ } ++ ++ if (gpuobj->im_pramin) { ++ if (gpuobj->flags & NVOBJ_FLAG_FAKE) ++ drm_free(gpuobj->im_pramin, sizeof(*gpuobj->im_pramin), ++ DRM_MEM_DRIVER); ++ else ++ nouveau_mem_free_block(gpuobj->im_pramin); ++ } ++ ++ list_del(&gpuobj->list); ++ ++ *pgpuobj = NULL; ++ drm_free(gpuobj, sizeof(*gpuobj), DRM_MEM_DRIVER); ++ return 0; ++} ++ ++static int ++nouveau_gpuobj_instance_get(struct drm_device *dev, ++ struct nouveau_channel *chan, ++ struct nouveau_gpuobj *gpuobj, uint32_t *inst) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_gpuobj *cpramin; ++ ++ /* card_type < NV_50) { ++ *inst = gpuobj->im_pramin->start; ++ return 0; ++ } ++ ++ if (chan && gpuobj->im_channel != chan->id) { ++ DRM_ERROR("Channel mismatch: obj %d, ref %d\n", ++ gpuobj->im_channel, chan->id); ++ return -EINVAL; ++ } ++ ++ /* NV50 channel-local instance */ ++ if (chan > 0) { ++ cpramin = chan->ramin->gpuobj; ++ *inst = gpuobj->im_pramin->start - cpramin->im_pramin->start; ++ return 0; ++ } ++ ++ /* NV50 global (VRAM) instance */ ++ if (gpuobj->im_channel < 0) { ++ /* ...from global heap */ ++ if (!gpuobj->im_backing) { ++ DRM_ERROR("AII, no VRAM backing gpuobj\n"); ++ return -EINVAL; ++ } ++ *inst = gpuobj->im_backing->start; ++ return 0; ++ } else { ++ /* ...from local heap */ ++ cpramin = dev_priv->fifos[gpuobj->im_channel]->ramin->gpuobj; ++ *inst = cpramin->im_backing->start + ++ (gpuobj->im_pramin->start - cpramin->im_pramin->start); ++ return 0; ++ } ++ ++ return -EINVAL; ++} ++ ++int ++nouveau_gpuobj_ref_add(struct drm_device *dev, struct nouveau_channel *chan, ++ uint32_t handle, struct nouveau_gpuobj *gpuobj, ++ struct nouveau_gpuobj_ref **ref_ret) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_gpuobj_ref *ref; ++ uint32_t instance; ++ int ret; ++ ++ DRM_DEBUG("ch%d h=0x%08x gpuobj=%p\n", ++ chan ? chan->id : -1, handle, gpuobj); ++ ++ if (!dev_priv || !gpuobj || (ref_ret && *ref_ret != NULL)) ++ return -EINVAL; ++ ++ if (!chan && !ref_ret) ++ return -EINVAL; ++ ++ ret = nouveau_gpuobj_instance_get(dev, chan, gpuobj, &instance); ++ if (ret) ++ return ret; ++ ++ ref = drm_calloc(1, sizeof(*ref), DRM_MEM_DRIVER); ++ if (!ref) ++ return -ENOMEM; ++ ref->gpuobj = gpuobj; ++ ref->channel = chan ? chan->id : -1; ++ ref->instance = instance; ++ ++ if (!ref_ret) { ++ ref->handle = handle; ++ ++ ret = nouveau_ramht_insert(dev, ref); ++ if (ret) { ++ drm_free(ref, sizeof(*ref), DRM_MEM_DRIVER); ++ return ret; ++ } ++ } else { ++ ref->handle = ~0; ++ *ref_ret = ref; ++ } ++ ++ ref->gpuobj->refcount++; ++ return 0; ++} ++ ++int nouveau_gpuobj_ref_del(struct drm_device *dev, struct nouveau_gpuobj_ref **pref) ++{ ++ struct nouveau_gpuobj_ref *ref; ++ ++ DRM_DEBUG("ref %p\n", pref ? *pref : NULL); ++ ++ if (!dev || !pref || *pref == NULL) ++ return -EINVAL; ++ ref = *pref; ++ ++ if (ref->handle != ~0) ++ nouveau_ramht_remove(dev, ref); ++ ++ if (ref->gpuobj) { ++ ref->gpuobj->refcount--; ++ ++ if (ref->gpuobj->refcount == 0) { ++ if (!(ref->gpuobj->flags & NVOBJ_FLAG_ALLOW_NO_REFS)) ++ nouveau_gpuobj_del(dev, &ref->gpuobj); ++ } ++ } ++ ++ *pref = NULL; ++ drm_free(ref, sizeof(ref), DRM_MEM_DRIVER); ++ return 0; ++} ++ ++int ++nouveau_gpuobj_new_ref(struct drm_device *dev, ++ struct nouveau_channel *oc, struct nouveau_channel *rc, ++ uint32_t handle, int size, int align, uint32_t flags, ++ struct nouveau_gpuobj_ref **ref) ++{ ++ struct nouveau_gpuobj *gpuobj = NULL; ++ int ret; ++ ++ if ((ret = nouveau_gpuobj_new(dev, oc, size, align, flags, &gpuobj))) ++ return ret; ++ ++ if ((ret = nouveau_gpuobj_ref_add(dev, rc, handle, gpuobj, ref))) { ++ nouveau_gpuobj_del(dev, &gpuobj); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++int ++nouveau_gpuobj_ref_find(struct nouveau_channel *chan, uint32_t handle, ++ struct nouveau_gpuobj_ref **ref_ret) ++{ ++ struct nouveau_gpuobj_ref *ref; ++ struct list_head *entry, *tmp; ++ ++ list_for_each_safe(entry, tmp, &chan->ramht_refs) { ++ ref = list_entry(entry, struct nouveau_gpuobj_ref, list); ++ ++ if (ref->handle == handle) { ++ if (ref_ret) ++ *ref_ret = ref; ++ return 0; ++ } ++ } ++ ++ return -EINVAL; ++} ++ ++int ++nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t p_offset, ++ uint32_t b_offset, uint32_t size, ++ uint32_t flags, struct nouveau_gpuobj **pgpuobj, ++ struct nouveau_gpuobj_ref **pref) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_gpuobj *gpuobj = NULL; ++ int i; ++ ++ DRM_DEBUG("p_offset=0x%08x b_offset=0x%08x size=0x%08x flags=0x%08x\n", ++ p_offset, b_offset, size, flags); ++ ++ gpuobj = drm_calloc(1, sizeof(*gpuobj), DRM_MEM_DRIVER); ++ if (!gpuobj) ++ return -ENOMEM; ++ DRM_DEBUG("gpuobj %p\n", gpuobj); ++ gpuobj->im_channel = -1; ++ gpuobj->flags = flags | NVOBJ_FLAG_FAKE; ++ ++ list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); ++ ++ if (p_offset != ~0) { ++ gpuobj->im_pramin = drm_calloc(1, sizeof(struct mem_block), ++ DRM_MEM_DRIVER); ++ if (!gpuobj->im_pramin) { ++ nouveau_gpuobj_del(dev, &gpuobj); ++ return -ENOMEM; ++ } ++ gpuobj->im_pramin->start = p_offset; ++ gpuobj->im_pramin->size = size; ++ } ++ ++ if (b_offset != ~0) { ++ gpuobj->im_backing = drm_calloc(1, sizeof(struct mem_block), ++ DRM_MEM_DRIVER); ++ if (!gpuobj->im_backing) { ++ nouveau_gpuobj_del(dev, &gpuobj); ++ return -ENOMEM; ++ } ++ gpuobj->im_backing->start = b_offset; ++ gpuobj->im_backing->size = size; ++ } ++ ++ if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { ++ for (i = 0; i < gpuobj->im_pramin->size; i += 4) ++ INSTANCE_WR(gpuobj, i/4, 0); ++ } ++ ++ if (pref) { ++ if ((i = nouveau_gpuobj_ref_add(dev, NULL, 0, gpuobj, pref))) { ++ nouveau_gpuobj_del(dev, &gpuobj); ++ return i; ++ } ++ } ++ ++ if (pgpuobj) ++ *pgpuobj = gpuobj; ++ return 0; ++} ++ ++ ++static int ++nouveau_gpuobj_class_instmem_size(struct drm_device *dev, int class) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ /*XXX: dodgy hack for now */ ++ if (dev_priv->card_type >= NV_50) ++ return 24; ++ if (dev_priv->card_type >= NV_40) ++ return 32; ++ return 16; ++} ++ ++/* ++ DMA objects are used to reference a piece of memory in the ++ framebuffer, PCI or AGP address space. Each object is 16 bytes big ++ and looks as follows: ++ ++ entry[0] ++ 11:0 class (seems like I can always use 0 here) ++ 12 page table present? ++ 13 page entry linear? ++ 15:14 access: 0 rw, 1 ro, 2 wo ++ 17:16 target: 0 NV memory, 1 NV memory tiled, 2 PCI, 3 AGP ++ 31:20 dma adjust (bits 0-11 of the address) ++ entry[1] ++ dma limit (size of transfer) ++ entry[X] ++ 1 0 readonly, 1 readwrite ++ 31:12 dma frame address of the page (bits 12-31 of the address) ++ entry[N] ++ page table terminator, same value as the first pte, as does nvidia ++ rivatv uses 0xffffffff ++ ++ Non linear page tables need a list of frame addresses afterwards, ++ the rivatv project has some info on this. ++ ++ The method below creates a DMA object in instance RAM and returns a handle ++ to it that can be used to set up context objects. ++*/ ++int ++nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, ++ uint64_t offset, uint64_t size, int access, ++ int target, struct nouveau_gpuobj **gpuobj) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int ret; ++ uint32_t is_scatter_gather = 0; ++ ++ /* Total number of pages covered by the request. ++ */ ++ const unsigned int page_count = (size + PAGE_SIZE - 1) / PAGE_SIZE; ++ ++ ++ DRM_DEBUG("ch%d class=0x%04x offset=0x%llx size=0x%llx\n", ++ chan->id, class, offset, size); ++ DRM_DEBUG("access=%d target=%d\n", access, target); ++ ++ switch (target) { ++ case NV_DMA_TARGET_AGP: ++ offset += dev_priv->gart_info.aper_base; ++ break; ++ case NV_DMA_TARGET_PCI_NONLINEAR: ++ /*assume the "offset" is a virtual memory address*/ ++ is_scatter_gather = 1; ++ /*put back the right value*/ ++ target = NV_DMA_TARGET_PCI; ++ break; ++ default: ++ break; ++ } ++ ++ ret = nouveau_gpuobj_new(dev, chan, ++ is_scatter_gather ? ((page_count << 2) + 12) : nouveau_gpuobj_class_instmem_size(dev, class), ++ 16, ++ NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE, ++ gpuobj); ++ if (ret) { ++ DRM_ERROR("Error creating gpuobj: %d\n", ret); ++ return ret; ++ } ++ ++ if (dev_priv->card_type < NV_50) { ++ uint32_t frame, adjust, pte_flags = 0; ++ adjust = offset & 0x00000fff; ++ if (access != NV_DMA_ACCESS_RO) ++ pte_flags |= (1<<1); ++ ++ if ( ! is_scatter_gather ) ++ { ++ frame = offset & ~0x00000fff; ++ ++ INSTANCE_WR(*gpuobj, 0, ((1<<12) | (1<<13) | ++ (adjust << 20) | ++ (access << 14) | ++ (target << 16) | ++ class)); ++ INSTANCE_WR(*gpuobj, 1, size - 1); ++ INSTANCE_WR(*gpuobj, 2, frame | pte_flags); ++ INSTANCE_WR(*gpuobj, 3, frame | pte_flags); ++ } ++ else ++ { ++ /* Intial page entry in the scatter-gather area that ++ * corresponds to the base offset ++ */ ++ unsigned int idx = offset / PAGE_SIZE; ++ ++ uint32_t instance_offset; ++ unsigned int i; ++ ++ if ((idx + page_count) > dev->sg->pages) { ++ DRM_ERROR("Requested page range exceedes " ++ "allocated scatter-gather range!"); ++ return -E2BIG; ++ } ++ ++ DRM_DEBUG("Creating PCI DMA object using virtual zone starting at %#llx, size %d\n", offset, (uint32_t)size); ++ INSTANCE_WR(*gpuobj, 0, ((1<<12) | (0<<13) | ++ (adjust << 20) | ++ (access << 14) | ++ (target << 16) | ++ class)); ++ INSTANCE_WR(*gpuobj, 1, (uint32_t) size-1); ++ ++ ++ /*write starting at the third dword*/ ++ instance_offset = 2; ++ ++ /*for each PAGE, get its bus address, fill in the page table entry, and advance*/ ++ for (i = 0; i < page_count; i++) { ++ if (dev->sg->busaddr[idx] == 0) { ++ dev->sg->busaddr[idx] = ++ pci_map_page(dev->pdev, ++ dev->sg->pagelist[idx], ++ 0, ++ PAGE_SIZE, ++ DMA_BIDIRECTIONAL); ++ ++ if (dma_mapping_error(&dev->primary->kdev, dev->sg->busaddr[idx])) { ++ return -ENOMEM; ++ } ++ } ++ ++ frame = (uint32_t) dev->sg->busaddr[idx]; ++ INSTANCE_WR(*gpuobj, instance_offset, ++ frame | pte_flags); ++ ++ idx++; ++ instance_offset ++; ++ } ++ } ++ } else { ++ uint32_t flags0, flags5; ++ ++ if (target == NV_DMA_TARGET_VIDMEM) { ++ flags0 = 0x00190000; ++ flags5 = 0x00010000; ++ } else { ++ flags0 = 0x7fc00000; ++ flags5 = 0x00080000; ++ } ++ ++ INSTANCE_WR(*gpuobj, 0, flags0 | class); ++ INSTANCE_WR(*gpuobj, 1, offset + size - 1); ++ INSTANCE_WR(*gpuobj, 2, offset); ++ INSTANCE_WR(*gpuobj, 5, flags5); ++ } ++ ++ (*gpuobj)->engine = NVOBJ_ENGINE_SW; ++ (*gpuobj)->class = class; ++ return 0; ++} ++ ++int ++nouveau_gpuobj_gart_dma_new(struct nouveau_channel *chan, ++ uint64_t offset, uint64_t size, int access, ++ struct nouveau_gpuobj **gpuobj, ++ uint32_t *o_ret) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int ret; ++ ++ if (dev_priv->gart_info.type == NOUVEAU_GART_AGP || ++ (dev_priv->card_type >= NV_50 && ++ dev_priv->gart_info.type == NOUVEAU_GART_SGDMA)) { ++ ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, ++ offset, size, access, ++ NV_DMA_TARGET_AGP, gpuobj); ++ if (o_ret) ++ *o_ret = 0; ++ } else ++ if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA) { ++ *gpuobj = dev_priv->gart_info.sg_ctxdma; ++ if (offset & ~0xffffffffULL) { ++ DRM_ERROR("obj offset exceeds 32-bits\n"); ++ return -EINVAL; ++ } ++ if (o_ret) ++ *o_ret = (uint32_t)offset; ++ ret = (*gpuobj != NULL) ? 0 : -EINVAL; ++ } else { ++ DRM_ERROR("Invalid GART type %d\n", dev_priv->gart_info.type); ++ return -EINVAL; ++ } ++ ++ return ret; ++} ++ ++/* Context objects in the instance RAM have the following structure. ++ * On NV40 they are 32 byte long, on NV30 and smaller 16 bytes. ++ ++ NV4 - NV30: ++ ++ entry[0] ++ 11:0 class ++ 12 chroma key enable ++ 13 user clip enable ++ 14 swizzle enable ++ 17:15 patch config: ++ scrcopy_and, rop_and, blend_and, scrcopy, srccopy_pre, blend_pre ++ 18 synchronize enable ++ 19 endian: 1 big, 0 little ++ 21:20 dither mode ++ 23 single step enable ++ 24 patch status: 0 invalid, 1 valid ++ 25 context_surface 0: 1 valid ++ 26 context surface 1: 1 valid ++ 27 context pattern: 1 valid ++ 28 context rop: 1 valid ++ 29,30 context beta, beta4 ++ entry[1] ++ 7:0 mono format ++ 15:8 color format ++ 31:16 notify instance address ++ entry[2] ++ 15:0 dma 0 instance address ++ 31:16 dma 1 instance address ++ entry[3] ++ dma method traps ++ ++ NV40: ++ No idea what the exact format is. Here's what can be deducted: ++ ++ entry[0]: ++ 11:0 class (maybe uses more bits here?) ++ 17 user clip enable ++ 21:19 patch config ++ 25 patch status valid ? ++ entry[1]: ++ 15:0 DMA notifier (maybe 20:0) ++ entry[2]: ++ 15:0 DMA 0 instance (maybe 20:0) ++ 24 big endian ++ entry[3]: ++ 15:0 DMA 1 instance (maybe 20:0) ++ entry[4]: ++ entry[5]: ++ set to 0? ++*/ ++int ++nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class, ++ struct nouveau_gpuobj **gpuobj) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int ret; ++ ++ DRM_DEBUG("ch%d class=0x%04x\n", chan->id, class); ++ ++ ret = nouveau_gpuobj_new(dev, chan, ++ nouveau_gpuobj_class_instmem_size(dev, class), ++ 16, ++ NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE, ++ gpuobj); ++ if (ret) { ++ DRM_ERROR("Error creating gpuobj: %d\n", ret); ++ return ret; ++ } ++ ++ if (dev_priv->card_type >= NV_50) { ++ INSTANCE_WR(*gpuobj, 0, class); ++ INSTANCE_WR(*gpuobj, 5, 0x00010000); ++ } else { ++ switch (class) { ++ case NV_CLASS_NULL: ++ INSTANCE_WR(*gpuobj, 0, 0x00001030); ++ INSTANCE_WR(*gpuobj, 1, 0xFFFFFFFF); ++ break; ++ default: ++ if (dev_priv->card_type >= NV_40) { ++ INSTANCE_WR(*gpuobj, 0, class); ++#ifdef __BIG_ENDIAN ++ INSTANCE_WR(*gpuobj, 2, 0x01000000); ++#endif ++ } else { ++#ifdef __BIG_ENDIAN ++ INSTANCE_WR(*gpuobj, 0, class | 0x00080000); ++#else ++ INSTANCE_WR(*gpuobj, 0, class); ++#endif ++ } ++ } ++ } ++ ++ (*gpuobj)->engine = NVOBJ_ENGINE_GR; ++ (*gpuobj)->class = class; ++ return 0; ++} ++ ++static int ++nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_gpuobj *pramin = NULL; ++ int size, base, ret; ++ ++ DRM_DEBUG("ch%d\n", chan->id); ++ ++ /* Base amount for object storage (4KiB enough?) */ ++ size = 0x1000; ++ base = 0; ++ ++ /* PGRAPH context */ ++ ++ if (dev_priv->card_type == NV_50) { ++ /* Various fixed table thingos */ ++ size += 0x1400; /* mostly unknown stuff */ ++ size += 0x4000; /* vm pd */ ++ base = 0x6000; ++ /* RAMHT, not sure about setting size yet, 32KiB to be safe */ ++ size += 0x8000; ++ /* RAMFC */ ++ size += 0x1000; ++ /* PGRAPH context */ ++ size += 0x60000; ++ } ++ ++ DRM_DEBUG("ch%d PRAMIN size: 0x%08x bytes, base alloc=0x%08x\n", ++ chan->id, size, base); ++ ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, size, 0x1000, 0, ++ &chan->ramin); ++ if (ret) { ++ DRM_ERROR("Error allocating channel PRAMIN: %d\n", ret); ++ return ret; ++ } ++ pramin = chan->ramin->gpuobj; ++ ++ ret = nouveau_mem_init_heap(&chan->ramin_heap, ++ pramin->im_pramin->start + base, size); ++ if (ret) { ++ DRM_ERROR("Error creating PRAMIN heap: %d\n", ret); ++ nouveau_gpuobj_ref_del(dev, &chan->ramin); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++int ++nouveau_gpuobj_channel_init(struct nouveau_channel *chan, ++ uint32_t vram_h, uint32_t tt_h) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_gpuobj *vram = NULL, *tt = NULL; ++ int ret, i; ++ ++ INIT_LIST_HEAD(&chan->ramht_refs); ++ ++ DRM_DEBUG("ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h); ++ ++ /* Reserve a block of PRAMIN for the channel ++ *XXX: maybe on card_type == NV_50) { ++ ret = nouveau_gpuobj_channel_init_pramin(chan); ++ if (ret) ++ return ret; ++ } ++ ++ /* NV50 VM ++ * - Allocate per-channel page-directory ++ * - Point offset 0-512MiB at shared PCIEGART table ++ * - Point offset 512-1024MiB at shared VRAM table ++ */ ++ if (dev_priv->card_type >= NV_50) { ++ uint32_t vm_offset; ++ ++ vm_offset = (dev_priv->chipset & 0xf0) == 0x50 ? 0x1400 : 0x200; ++ vm_offset += chan->ramin->gpuobj->im_pramin->start; ++ if ((ret = nouveau_gpuobj_new_fake(dev, vm_offset, ~0, 0x4000, ++ 0, &chan->vm_pd, NULL))) ++ return ret; ++ for (i=0; i<0x4000; i+=8) { ++ INSTANCE_WR(chan->vm_pd, (i+0)/4, 0x00000000); ++ INSTANCE_WR(chan->vm_pd, (i+4)/4, 0xdeadcafe); ++ } ++ ++ if ((ret = nouveau_gpuobj_ref_add(dev, NULL, 0, ++ dev_priv->gart_info.sg_ctxdma, ++ &chan->vm_gart_pt))) ++ return ret; ++ INSTANCE_WR(chan->vm_pd, (0+0)/4, ++ chan->vm_gart_pt->instance | 0x03); ++ INSTANCE_WR(chan->vm_pd, (0+4)/4, 0x00000000); ++ ++ if ((ret = nouveau_gpuobj_ref_add(dev, NULL, 0, ++ dev_priv->vm_vram_pt, ++ &chan->vm_vram_pt))) ++ return ret; ++ INSTANCE_WR(chan->vm_pd, (8+0)/4, ++ chan->vm_vram_pt->instance | 0x61); ++ INSTANCE_WR(chan->vm_pd, (8+4)/4, 0x00000000); ++ } ++ ++ /* RAMHT */ ++ if (dev_priv->card_type < NV_50) { ++ ret = nouveau_gpuobj_ref_add(dev, NULL, 0, dev_priv->ramht, ++ &chan->ramht); ++ if (ret) ++ return ret; ++ } else { ++ ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0, ++ 0x8000, 16, ++ NVOBJ_FLAG_ZERO_ALLOC, ++ &chan->ramht); ++ if (ret) ++ return ret; ++ } ++ ++ /* VRAM ctxdma */ ++ if (dev_priv->card_type >= NV_50) { ++ ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, ++ 0, 0x100000000ULL, ++ NV_DMA_ACCESS_RW, ++ NV_DMA_TARGET_AGP, &vram); ++ if (ret) { ++ DRM_ERROR("Error creating VRAM ctxdma: %d\n", ret); ++ return ret; ++ } ++ } else ++ if ((ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, ++ 0, dev_priv->fb_available_size, ++ NV_DMA_ACCESS_RW, ++ NV_DMA_TARGET_VIDMEM, &vram))) { ++ DRM_ERROR("Error creating VRAM ctxdma: %d\n", ret); ++ return ret; ++ } ++ ++ if ((ret = nouveau_gpuobj_ref_add(dev, chan, vram_h, vram, NULL))) { ++ DRM_ERROR("Error referencing VRAM ctxdma: %d\n", ret); ++ return ret; ++ } ++ ++ /* TT memory ctxdma */ ++ if (dev_priv->card_type >= NV_50) { ++ tt = vram; ++ } else ++ if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) { ++ ret = nouveau_gpuobj_gart_dma_new(chan, 0, ++ dev_priv->gart_info.aper_size, ++ NV_DMA_ACCESS_RW, &tt, NULL); ++ } else ++ if (dev_priv->pci_heap) { ++ ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, ++ 0, dev->sg->pages * PAGE_SIZE, ++ NV_DMA_ACCESS_RW, ++ NV_DMA_TARGET_PCI_NONLINEAR, &tt); ++ } else { ++ DRM_ERROR("Invalid GART type %d\n", dev_priv->gart_info.type); ++ ret = -EINVAL; ++ } ++ ++ if (ret) { ++ DRM_ERROR("Error creating TT ctxdma: %d\n", ret); ++ return ret; ++ } ++ ++ ret = nouveau_gpuobj_ref_add(dev, chan, tt_h, tt, NULL); ++ if (ret) { ++ DRM_ERROR("Error referencing TT ctxdma: %d\n", ret); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++void ++nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct list_head *entry, *tmp; ++ struct nouveau_gpuobj_ref *ref; ++ ++ DRM_DEBUG("ch%d\n", chan->id); ++ ++ list_for_each_safe(entry, tmp, &chan->ramht_refs) { ++ ref = list_entry(entry, struct nouveau_gpuobj_ref, list); ++ ++ nouveau_gpuobj_ref_del(dev, &ref); ++ } ++ ++ nouveau_gpuobj_ref_del(dev, &chan->ramht); ++ ++ nouveau_gpuobj_del(dev, &chan->vm_pd); ++ nouveau_gpuobj_ref_del(dev, &chan->vm_gart_pt); ++ nouveau_gpuobj_ref_del(dev, &chan->vm_vram_pt); ++ ++ if (chan->ramin_heap) ++ nouveau_mem_takedown(&chan->ramin_heap); ++ if (chan->ramin) ++ nouveau_gpuobj_ref_del(dev, &chan->ramin); ++ ++} ++ ++int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct nouveau_channel *chan; ++ struct drm_nouveau_grobj_alloc *init = data; ++ struct nouveau_gpuobj *gr = NULL; ++ int ret; ++ ++ NOUVEAU_CHECK_INITIALISED_WITH_RETURN; ++ NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(init->channel, file_priv, chan); ++ ++ //FIXME: check args, only allow trusted objects to be created ++ ++ if (init->handle == ~0) ++ return -EINVAL; ++ ++ if (nouveau_gpuobj_ref_find(chan, init->handle, NULL) == 0) ++ return -EEXIST; ++ ++ ret = nouveau_gpuobj_gr_new(chan, init->class, &gr); ++ if (ret) { ++ DRM_ERROR("Error creating gr object: %d (%d/0x%08x)\n", ++ ret, init->channel, init->handle); ++ return ret; ++ } ++ ++ if ((ret = nouveau_gpuobj_ref_add(dev, chan, init->handle, gr, NULL))) { ++ DRM_ERROR("Error referencing gr object: %d (%d/0x%08x\n)", ++ ret, init->channel, init->handle); ++ nouveau_gpuobj_del(dev, &gr); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_nouveau_gpuobj_free *objfree = data; ++ struct nouveau_gpuobj_ref *ref; ++ struct nouveau_channel *chan; ++ int ret; ++ ++ NOUVEAU_CHECK_INITIALISED_WITH_RETURN; ++ NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(objfree->channel, file_priv, chan); ++ ++ if ((ret = nouveau_gpuobj_ref_find(chan, objfree->handle, &ref))) ++ return ret; ++ nouveau_gpuobj_ref_del(dev, &ref); ++ ++ return 0; ++} +diff --git a/drivers/gpu/drm/nouveau/nouveau_reg.h b/drivers/gpu/drm/nouveau/nouveau_reg.h +new file mode 100644 +index 0000000..1ae0177 +--- /dev/null ++++ b/drivers/gpu/drm/nouveau/nouveau_reg.h +@@ -0,0 +1,593 @@ ++ ++ ++#define NV03_BOOT_0 0x00100000 ++# define NV03_BOOT_0_RAM_AMOUNT 0x00000003 ++# define NV03_BOOT_0_RAM_AMOUNT_8MB 0x00000000 ++# define NV03_BOOT_0_RAM_AMOUNT_2MB 0x00000001 ++# define NV03_BOOT_0_RAM_AMOUNT_4MB 0x00000002 ++# define NV03_BOOT_0_RAM_AMOUNT_8MB_SDRAM 0x00000003 ++# define NV04_BOOT_0_RAM_AMOUNT_32MB 0x00000000 ++# define NV04_BOOT_0_RAM_AMOUNT_4MB 0x00000001 ++# define NV04_BOOT_0_RAM_AMOUNT_8MB 0x00000002 ++# define NV04_BOOT_0_RAM_AMOUNT_16MB 0x00000003 ++ ++#define NV04_FIFO_DATA 0x0010020c ++# define NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK 0xfff00000 ++# define NV10_FIFO_DATA_RAM_AMOUNT_MB_SHIFT 20 ++ ++#define NV_RAMIN 0x00700000 ++ ++#define NV_RAMHT_HANDLE_OFFSET 0 ++#define NV_RAMHT_CONTEXT_OFFSET 4 ++# define NV_RAMHT_CONTEXT_VALID (1<<31) ++# define NV_RAMHT_CONTEXT_CHANNEL_SHIFT 24 ++# define NV_RAMHT_CONTEXT_ENGINE_SHIFT 16 ++# define NV_RAMHT_CONTEXT_ENGINE_SOFTWARE 0 ++# define NV_RAMHT_CONTEXT_ENGINE_GRAPHICS 1 ++# define NV_RAMHT_CONTEXT_INSTANCE_SHIFT 0 ++# define NV40_RAMHT_CONTEXT_CHANNEL_SHIFT 23 ++# define NV40_RAMHT_CONTEXT_ENGINE_SHIFT 20 ++# define NV40_RAMHT_CONTEXT_INSTANCE_SHIFT 0 ++ ++/* DMA object defines */ ++#define NV_DMA_ACCESS_RW 0 ++#define NV_DMA_ACCESS_RO 1 ++#define NV_DMA_ACCESS_WO 2 ++#define NV_DMA_TARGET_VIDMEM 0 ++#define NV_DMA_TARGET_PCI 2 ++#define NV_DMA_TARGET_AGP 3 ++/*The following is not a real value used by nvidia cards, it's changed by nouveau_object_dma_create*/ ++#define NV_DMA_TARGET_PCI_NONLINEAR 8 ++ ++/* Some object classes we care about in the drm */ ++#define NV_CLASS_DMA_FROM_MEMORY 0x00000002 ++#define NV_CLASS_DMA_TO_MEMORY 0x00000003 ++#define NV_CLASS_NULL 0x00000030 ++#define NV_CLASS_DMA_IN_MEMORY 0x0000003D ++ ++#define NV03_USER(i) (0x00800000+(i*NV03_USER_SIZE)) ++#define NV03_USER__SIZE 16 ++#define NV10_USER__SIZE 32 ++#define NV03_USER_SIZE 0x00010000 ++#define NV03_USER_DMA_PUT(i) (0x00800040+(i*NV03_USER_SIZE)) ++#define NV03_USER_DMA_PUT__SIZE 16 ++#define NV10_USER_DMA_PUT__SIZE 32 ++#define NV03_USER_DMA_GET(i) (0x00800044+(i*NV03_USER_SIZE)) ++#define NV03_USER_DMA_GET__SIZE 16 ++#define NV10_USER_DMA_GET__SIZE 32 ++#define NV03_USER_REF_CNT(i) (0x00800048+(i*NV03_USER_SIZE)) ++#define NV03_USER_REF_CNT__SIZE 16 ++#define NV10_USER_REF_CNT__SIZE 32 ++ ++#define NV40_USER(i) (0x00c00000+(i*NV40_USER_SIZE)) ++#define NV40_USER_SIZE 0x00001000 ++#define NV40_USER_DMA_PUT(i) (0x00c00040+(i*NV40_USER_SIZE)) ++#define NV40_USER_DMA_PUT__SIZE 32 ++#define NV40_USER_DMA_GET(i) (0x00c00044+(i*NV40_USER_SIZE)) ++#define NV40_USER_DMA_GET__SIZE 32 ++#define NV40_USER_REF_CNT(i) (0x00c00048+(i*NV40_USER_SIZE)) ++#define NV40_USER_REF_CNT__SIZE 32 ++ ++#define NV50_USER(i) (0x00c00000+(i*NV50_USER_SIZE)) ++#define NV50_USER_SIZE 0x00002000 ++#define NV50_USER_DMA_PUT(i) (0x00c00040+(i*NV50_USER_SIZE)) ++#define NV50_USER_DMA_PUT__SIZE 128 ++#define NV50_USER_DMA_GET(i) (0x00c00044+(i*NV50_USER_SIZE)) ++#define NV50_USER_DMA_GET__SIZE 128 ++/*XXX: I don't think this actually exists.. */ ++#define NV50_USER_REF_CNT(i) (0x00c00048+(i*NV50_USER_SIZE)) ++#define NV50_USER_REF_CNT__SIZE 128 ++ ++#define NV03_FIFO_SIZE 0x8000UL ++ ++#define NV03_PMC_BOOT_0 0x00000000 ++#define NV03_PMC_BOOT_1 0x00000004 ++#define NV03_PMC_INTR_0 0x00000100 ++# define NV_PMC_INTR_0_PFIFO_PENDING (1<< 8) ++# define NV_PMC_INTR_0_PGRAPH_PENDING (1<<12) ++# define NV_PMC_INTR_0_NV50_I2C_PENDING (1<<21) ++# define NV_PMC_INTR_0_CRTC0_PENDING (1<<24) ++# define NV_PMC_INTR_0_CRTC1_PENDING (1<<25) ++# define NV_PMC_INTR_0_NV50_DISPLAY_PENDING (1<<26) ++# define NV_PMC_INTR_0_CRTCn_PENDING (3<<24) ++#define NV03_PMC_INTR_EN_0 0x00000140 ++# define NV_PMC_INTR_EN_0_MASTER_ENABLE (1<< 0) ++#define NV03_PMC_ENABLE 0x00000200 ++# define NV_PMC_ENABLE_PFIFO (1<< 8) ++# define NV_PMC_ENABLE_PGRAPH (1<<12) ++/* Disabling the below bit breaks newer (G7X only?) mobile chipsets, ++ * the card will hang early on in the X init process. ++ */ ++# define NV_PMC_ENABLE_UNK13 (1<<13) ++#define NV40_PMC_1700 0x00001700 ++#define NV40_PMC_1704 0x00001704 ++#define NV40_PMC_1708 0x00001708 ++#define NV40_PMC_170C 0x0000170C ++ ++/* probably PMC ? */ ++#define NV50_PUNK_BAR0_PRAMIN 0x00001700 ++#define NV50_PUNK_BAR_CFG_BASE 0x00001704 ++#define NV50_PUNK_BAR_CFG_BASE_VALID (1<<30) ++#define NV50_PUNK_BAR1_CTXDMA 0x00001708 ++#define NV50_PUNK_BAR1_CTXDMA_VALID (1<<31) ++#define NV50_PUNK_BAR3_CTXDMA 0x0000170C ++#define NV50_PUNK_BAR3_CTXDMA_VALID (1<<31) ++#define NV50_PUNK_UNK1710 0x00001710 ++ ++#define NV04_PBUS_PCI_NV_1 0x00001804 ++#define NV04_PBUS_PCI_NV_19 0x0000184C ++ ++#define NV04_PTIMER_INTR_0 0x00009100 ++#define NV04_PTIMER_INTR_EN_0 0x00009140 ++#define NV04_PTIMER_NUMERATOR 0x00009200 ++#define NV04_PTIMER_DENOMINATOR 0x00009210 ++#define NV04_PTIMER_TIME_0 0x00009400 ++#define NV04_PTIMER_TIME_1 0x00009410 ++#define NV04_PTIMER_ALARM_0 0x00009420 ++ ++#define NV50_I2C_CONTROLLER 0x0000E054 ++ ++#define NV04_PFB_CFG0 0x00100200 ++#define NV04_PFB_CFG1 0x00100204 ++#define NV40_PFB_020C 0x0010020C ++#define NV10_PFB_TILE(i) (0x00100240 + (i*16)) ++#define NV10_PFB_TILE__SIZE 8 ++#define NV10_PFB_TLIMIT(i) (0x00100244 + (i*16)) ++#define NV10_PFB_TSIZE(i) (0x00100248 + (i*16)) ++#define NV10_PFB_TSTATUS(i) (0x0010024C + (i*16)) ++#define NV10_PFB_CLOSE_PAGE2 0x0010033C ++#define NV40_PFB_TILE(i) (0x00100600 + (i*16)) ++#define NV40_PFB_TILE__SIZE_0 12 ++#define NV40_PFB_TILE__SIZE_1 15 ++#define NV40_PFB_TLIMIT(i) (0x00100604 + (i*16)) ++#define NV40_PFB_TSIZE(i) (0x00100608 + (i*16)) ++#define NV40_PFB_TSTATUS(i) (0x0010060C + (i*16)) ++#define NV40_PFB_UNK_800 0x00100800 ++ ++#define NV04_PGRAPH_DEBUG_0 0x00400080 ++#define NV04_PGRAPH_DEBUG_1 0x00400084 ++#define NV04_PGRAPH_DEBUG_2 0x00400088 ++#define NV04_PGRAPH_DEBUG_3 0x0040008c ++#define NV10_PGRAPH_DEBUG_4 0x00400090 ++#define NV03_PGRAPH_INTR 0x00400100 ++#define NV03_PGRAPH_NSTATUS 0x00400104 ++# define NV04_PGRAPH_NSTATUS_STATE_IN_USE (1<<11) ++# define NV04_PGRAPH_NSTATUS_INVALID_STATE (1<<12) ++# define NV04_PGRAPH_NSTATUS_BAD_ARGUMENT (1<<13) ++# define NV04_PGRAPH_NSTATUS_PROTECTION_FAULT (1<<14) ++# define NV10_PGRAPH_NSTATUS_STATE_IN_USE (1<<23) ++# define NV10_PGRAPH_NSTATUS_INVALID_STATE (1<<24) ++# define NV10_PGRAPH_NSTATUS_BAD_ARGUMENT (1<<25) ++# define NV10_PGRAPH_NSTATUS_PROTECTION_FAULT (1<<26) ++#define NV03_PGRAPH_NSOURCE 0x00400108 ++# define NV03_PGRAPH_NSOURCE_NOTIFICATION (1<< 0) ++# define NV03_PGRAPH_NSOURCE_DATA_ERROR (1<< 1) ++# define NV03_PGRAPH_NSOURCE_PROTECTION_ERROR (1<< 2) ++# define NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION (1<< 3) ++# define NV03_PGRAPH_NSOURCE_LIMIT_COLOR (1<< 4) ++# define NV03_PGRAPH_NSOURCE_LIMIT_ZETA (1<< 5) ++# define NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD (1<< 6) ++# define NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION (1<< 7) ++# define NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION (1<< 8) ++# define NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION (1<< 9) ++# define NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION (1<<10) ++# define NV03_PGRAPH_NSOURCE_STATE_INVALID (1<<11) ++# define NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY (1<<12) ++# define NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE (1<<13) ++# define NV03_PGRAPH_NSOURCE_METHOD_CNT (1<<14) ++# define NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION (1<<15) ++# define NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION (1<<16) ++# define NV03_PGRAPH_NSOURCE_DMA_WIDTH_A (1<<17) ++# define NV03_PGRAPH_NSOURCE_DMA_WIDTH_B (1<<18) ++#define NV03_PGRAPH_INTR_EN 0x00400140 ++#define NV40_PGRAPH_INTR_EN 0x0040013C ++# define NV_PGRAPH_INTR_NOTIFY (1<< 0) ++# define NV_PGRAPH_INTR_MISSING_HW (1<< 4) ++# define NV_PGRAPH_INTR_CONTEXT_SWITCH (1<<12) ++# define NV_PGRAPH_INTR_BUFFER_NOTIFY (1<<16) ++# define NV_PGRAPH_INTR_ERROR (1<<20) ++#define NV10_PGRAPH_CTX_CONTROL 0x00400144 ++#define NV10_PGRAPH_CTX_USER 0x00400148 ++#define NV10_PGRAPH_CTX_SWITCH1 0x0040014C ++#define NV10_PGRAPH_CTX_SWITCH2 0x00400150 ++#define NV10_PGRAPH_CTX_SWITCH3 0x00400154 ++#define NV10_PGRAPH_CTX_SWITCH4 0x00400158 ++#define NV10_PGRAPH_CTX_SWITCH5 0x0040015C ++#define NV04_PGRAPH_CTX_SWITCH1 0x00400160 ++#define NV10_PGRAPH_CTX_CACHE1 0x00400160 ++#define NV04_PGRAPH_CTX_SWITCH2 0x00400164 ++#define NV04_PGRAPH_CTX_SWITCH3 0x00400168 ++#define NV04_PGRAPH_CTX_SWITCH4 0x0040016C ++#define NV04_PGRAPH_CTX_CONTROL 0x00400170 ++#define NV04_PGRAPH_CTX_USER 0x00400174 ++#define NV04_PGRAPH_CTX_CACHE1 0x00400180 ++#define NV10_PGRAPH_CTX_CACHE2 0x00400180 ++#define NV03_PGRAPH_CTX_CONTROL 0x00400190 ++#define NV03_PGRAPH_CTX_USER 0x00400194 ++#define NV04_PGRAPH_CTX_CACHE2 0x004001A0 ++#define NV10_PGRAPH_CTX_CACHE3 0x004001A0 ++#define NV04_PGRAPH_CTX_CACHE3 0x004001C0 ++#define NV10_PGRAPH_CTX_CACHE4 0x004001C0 ++#define NV04_PGRAPH_CTX_CACHE4 0x004001E0 ++#define NV10_PGRAPH_CTX_CACHE5 0x004001E0 ++#define NV40_PGRAPH_CTXCTL_0304 0x00400304 ++#define NV40_PGRAPH_CTXCTL_0304_XFER_CTX 0x00000001 ++#define NV40_PGRAPH_CTXCTL_UCODE_STAT 0x00400308 ++#define NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_MASK 0xff000000 ++#define NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_SHIFT 24 ++#define NV40_PGRAPH_CTXCTL_UCODE_STAT_OP_MASK 0x00ffffff ++#define NV40_PGRAPH_CTXCTL_0310 0x00400310 ++#define NV40_PGRAPH_CTXCTL_0310_XFER_SAVE 0x00000020 ++#define NV40_PGRAPH_CTXCTL_0310_XFER_LOAD 0x00000040 ++#define NV40_PGRAPH_CTXCTL_030C 0x0040030c ++#define NV40_PGRAPH_CTXCTL_UCODE_INDEX 0x00400324 ++#define NV40_PGRAPH_CTXCTL_UCODE_DATA 0x00400328 ++#define NV40_PGRAPH_CTXCTL_CUR 0x0040032c ++#define NV40_PGRAPH_CTXCTL_CUR_LOADED 0x01000000 ++#define NV40_PGRAPH_CTXCTL_CUR_INST_MASK 0x000FFFFF ++#define NV03_PGRAPH_ABS_X_RAM 0x00400400 ++#define NV03_PGRAPH_ABS_Y_RAM 0x00400480 ++#define NV03_PGRAPH_X_MISC 0x00400500 ++#define NV03_PGRAPH_Y_MISC 0x00400504 ++#define NV04_PGRAPH_VALID1 0x00400508 ++#define NV04_PGRAPH_SOURCE_COLOR 0x0040050C ++#define NV04_PGRAPH_MISC24_0 0x00400510 ++#define NV03_PGRAPH_XY_LOGIC_MISC0 0x00400514 ++#define NV03_PGRAPH_XY_LOGIC_MISC1 0x00400518 ++#define NV03_PGRAPH_XY_LOGIC_MISC2 0x0040051C ++#define NV03_PGRAPH_XY_LOGIC_MISC3 0x00400520 ++#define NV03_PGRAPH_CLIPX_0 0x00400524 ++#define NV03_PGRAPH_CLIPX_1 0x00400528 ++#define NV03_PGRAPH_CLIPY_0 0x0040052C ++#define NV03_PGRAPH_CLIPY_1 0x00400530 ++#define NV03_PGRAPH_ABS_ICLIP_XMAX 0x00400534 ++#define NV03_PGRAPH_ABS_ICLIP_YMAX 0x00400538 ++#define NV03_PGRAPH_ABS_UCLIP_XMIN 0x0040053C ++#define NV03_PGRAPH_ABS_UCLIP_YMIN 0x00400540 ++#define NV03_PGRAPH_ABS_UCLIP_XMAX 0x00400544 ++#define NV03_PGRAPH_ABS_UCLIP_YMAX 0x00400548 ++#define NV03_PGRAPH_ABS_UCLIPA_XMIN 0x00400560 ++#define NV03_PGRAPH_ABS_UCLIPA_YMIN 0x00400564 ++#define NV03_PGRAPH_ABS_UCLIPA_XMAX 0x00400568 ++#define NV03_PGRAPH_ABS_UCLIPA_YMAX 0x0040056C ++#define NV04_PGRAPH_MISC24_1 0x00400570 ++#define NV04_PGRAPH_MISC24_2 0x00400574 ++#define NV04_PGRAPH_VALID2 0x00400578 ++#define NV04_PGRAPH_PASSTHRU_0 0x0040057C ++#define NV04_PGRAPH_PASSTHRU_1 0x00400580 ++#define NV04_PGRAPH_PASSTHRU_2 0x00400584 ++#define NV10_PGRAPH_DIMX_TEXTURE 0x00400588 ++#define NV10_PGRAPH_WDIMX_TEXTURE 0x0040058C ++#define NV04_PGRAPH_COMBINE_0_ALPHA 0x00400590 ++#define NV04_PGRAPH_COMBINE_0_COLOR 0x00400594 ++#define NV04_PGRAPH_COMBINE_1_ALPHA 0x00400598 ++#define NV04_PGRAPH_COMBINE_1_COLOR 0x0040059C ++#define NV04_PGRAPH_FORMAT_0 0x004005A8 ++#define NV04_PGRAPH_FORMAT_1 0x004005AC ++#define NV04_PGRAPH_FILTER_0 0x004005B0 ++#define NV04_PGRAPH_FILTER_1 0x004005B4 ++#define NV03_PGRAPH_MONO_COLOR0 0x00400600 ++#define NV04_PGRAPH_ROP3 0x00400604 ++#define NV04_PGRAPH_BETA_AND 0x00400608 ++#define NV04_PGRAPH_BETA_PREMULT 0x0040060C ++#define NV04_PGRAPH_LIMIT_VIOL_PIX 0x00400610 ++#define NV04_PGRAPH_FORMATS 0x00400618 ++#define NV10_PGRAPH_DEBUG_2 0x00400620 ++#define NV04_PGRAPH_BOFFSET0 0x00400640 ++#define NV04_PGRAPH_BOFFSET1 0x00400644 ++#define NV04_PGRAPH_BOFFSET2 0x00400648 ++#define NV04_PGRAPH_BOFFSET3 0x0040064C ++#define NV04_PGRAPH_BOFFSET4 0x00400650 ++#define NV04_PGRAPH_BOFFSET5 0x00400654 ++#define NV04_PGRAPH_BBASE0 0x00400658 ++#define NV04_PGRAPH_BBASE1 0x0040065C ++#define NV04_PGRAPH_BBASE2 0x00400660 ++#define NV04_PGRAPH_BBASE3 0x00400664 ++#define NV04_PGRAPH_BBASE4 0x00400668 ++#define NV04_PGRAPH_BBASE5 0x0040066C ++#define NV04_PGRAPH_BPITCH0 0x00400670 ++#define NV04_PGRAPH_BPITCH1 0x00400674 ++#define NV04_PGRAPH_BPITCH2 0x00400678 ++#define NV04_PGRAPH_BPITCH3 0x0040067C ++#define NV04_PGRAPH_BPITCH4 0x00400680 ++#define NV04_PGRAPH_BLIMIT0 0x00400684 ++#define NV04_PGRAPH_BLIMIT1 0x00400688 ++#define NV04_PGRAPH_BLIMIT2 0x0040068C ++#define NV04_PGRAPH_BLIMIT3 0x00400690 ++#define NV04_PGRAPH_BLIMIT4 0x00400694 ++#define NV04_PGRAPH_BLIMIT5 0x00400698 ++#define NV04_PGRAPH_BSWIZZLE2 0x0040069C ++#define NV04_PGRAPH_BSWIZZLE5 0x004006A0 ++#define NV03_PGRAPH_STATUS 0x004006B0 ++#define NV04_PGRAPH_STATUS 0x00400700 ++#define NV04_PGRAPH_TRAPPED_ADDR 0x00400704 ++#define NV04_PGRAPH_TRAPPED_DATA 0x00400708 ++#define NV04_PGRAPH_SURFACE 0x0040070C ++#define NV10_PGRAPH_TRAPPED_DATA_HIGH 0x0040070C ++#define NV04_PGRAPH_STATE 0x00400710 ++#define NV10_PGRAPH_SURFACE 0x00400710 ++#define NV04_PGRAPH_NOTIFY 0x00400714 ++#define NV10_PGRAPH_STATE 0x00400714 ++#define NV10_PGRAPH_NOTIFY 0x00400718 ++ ++#define NV04_PGRAPH_FIFO 0x00400720 ++ ++#define NV04_PGRAPH_BPIXEL 0x00400724 ++#define NV10_PGRAPH_RDI_INDEX 0x00400750 ++#define NV04_PGRAPH_FFINTFC_ST2 0x00400754 ++#define NV10_PGRAPH_RDI_DATA 0x00400754 ++#define NV04_PGRAPH_DMA_PITCH 0x00400760 ++#define NV10_PGRAPH_FFINTFC_ST2 0x00400764 ++#define NV04_PGRAPH_DVD_COLORFMT 0x00400764 ++#define NV04_PGRAPH_SCALED_FORMAT 0x00400768 ++#define NV10_PGRAPH_DMA_PITCH 0x00400770 ++#define NV10_PGRAPH_DVD_COLORFMT 0x00400774 ++#define NV10_PGRAPH_SCALED_FORMAT 0x00400778 ++#define NV20_PGRAPH_CHANNEL_CTX_TABLE 0x00400780 ++#define NV20_PGRAPH_CHANNEL_CTX_POINTER 0x00400784 ++#define NV20_PGRAPH_CHANNEL_CTX_XFER 0x00400788 ++#define NV20_PGRAPH_CHANNEL_CTX_XFER_LOAD 0x00000001 ++#define NV20_PGRAPH_CHANNEL_CTX_XFER_SAVE 0x00000002 ++#define NV04_PGRAPH_PATT_COLOR0 0x00400800 ++#define NV04_PGRAPH_PATT_COLOR1 0x00400804 ++#define NV04_PGRAPH_PATTERN 0x00400808 ++#define NV04_PGRAPH_PATTERN_SHAPE 0x00400810 ++#define NV04_PGRAPH_CHROMA 0x00400814 ++#define NV04_PGRAPH_CONTROL0 0x00400818 ++#define NV04_PGRAPH_CONTROL1 0x0040081C ++#define NV04_PGRAPH_CONTROL2 0x00400820 ++#define NV04_PGRAPH_BLEND 0x00400824 ++#define NV04_PGRAPH_STORED_FMT 0x00400830 ++#define NV04_PGRAPH_PATT_COLORRAM 0x00400900 ++#define NV40_PGRAPH_TILE0(i) (0x00400900 + (i*16)) ++#define NV40_PGRAPH_TLIMIT0(i) (0x00400904 + (i*16)) ++#define NV40_PGRAPH_TSIZE0(i) (0x00400908 + (i*16)) ++#define NV40_PGRAPH_TSTATUS0(i) (0x0040090C + (i*16)) ++#define NV10_PGRAPH_TILE(i) (0x00400B00 + (i*16)) ++#define NV10_PGRAPH_TLIMIT(i) (0x00400B04 + (i*16)) ++#define NV10_PGRAPH_TSIZE(i) (0x00400B08 + (i*16)) ++#define NV10_PGRAPH_TSTATUS(i) (0x00400B0C + (i*16)) ++#define NV04_PGRAPH_U_RAM 0x00400D00 ++#define NV47_PGRAPH_TILE0(i) (0x00400D00 + (i*16)) ++#define NV47_PGRAPH_TLIMIT0(i) (0x00400D04 + (i*16)) ++#define NV47_PGRAPH_TSIZE0(i) (0x00400D08 + (i*16)) ++#define NV47_PGRAPH_TSTATUS0(i) (0x00400D0C + (i*16)) ++#define NV04_PGRAPH_V_RAM 0x00400D40 ++#define NV04_PGRAPH_W_RAM 0x00400D80 ++#define NV10_PGRAPH_COMBINER0_IN_ALPHA 0x00400E40 ++#define NV10_PGRAPH_COMBINER1_IN_ALPHA 0x00400E44 ++#define NV10_PGRAPH_COMBINER0_IN_RGB 0x00400E48 ++#define NV10_PGRAPH_COMBINER1_IN_RGB 0x00400E4C ++#define NV10_PGRAPH_COMBINER_COLOR0 0x00400E50 ++#define NV10_PGRAPH_COMBINER_COLOR1 0x00400E54 ++#define NV10_PGRAPH_COMBINER0_OUT_ALPHA 0x00400E58 ++#define NV10_PGRAPH_COMBINER1_OUT_ALPHA 0x00400E5C ++#define NV10_PGRAPH_COMBINER0_OUT_RGB 0x00400E60 ++#define NV10_PGRAPH_COMBINER1_OUT_RGB 0x00400E64 ++#define NV10_PGRAPH_COMBINER_FINAL0 0x00400E68 ++#define NV10_PGRAPH_COMBINER_FINAL1 0x00400E6C ++#define NV10_PGRAPH_WINDOWCLIP_HORIZONTAL 0x00400F00 ++#define NV10_PGRAPH_WINDOWCLIP_VERTICAL 0x00400F20 ++#define NV10_PGRAPH_XFMODE0 0x00400F40 ++#define NV10_PGRAPH_XFMODE1 0x00400F44 ++#define NV10_PGRAPH_GLOBALSTATE0 0x00400F48 ++#define NV10_PGRAPH_GLOBALSTATE1 0x00400F4C ++#define NV10_PGRAPH_PIPE_ADDRESS 0x00400F50 ++#define NV10_PGRAPH_PIPE_DATA 0x00400F54 ++#define NV04_PGRAPH_DMA_START_0 0x00401000 ++#define NV04_PGRAPH_DMA_START_1 0x00401004 ++#define NV04_PGRAPH_DMA_LENGTH 0x00401008 ++#define NV04_PGRAPH_DMA_MISC 0x0040100C ++#define NV04_PGRAPH_DMA_DATA_0 0x00401020 ++#define NV04_PGRAPH_DMA_DATA_1 0x00401024 ++#define NV04_PGRAPH_DMA_RM 0x00401030 ++#define NV04_PGRAPH_DMA_A_XLATE_INST 0x00401040 ++#define NV04_PGRAPH_DMA_A_CONTROL 0x00401044 ++#define NV04_PGRAPH_DMA_A_LIMIT 0x00401048 ++#define NV04_PGRAPH_DMA_A_TLB_PTE 0x0040104C ++#define NV04_PGRAPH_DMA_A_TLB_TAG 0x00401050 ++#define NV04_PGRAPH_DMA_A_ADJ_OFFSET 0x00401054 ++#define NV04_PGRAPH_DMA_A_OFFSET 0x00401058 ++#define NV04_PGRAPH_DMA_A_SIZE 0x0040105C ++#define NV04_PGRAPH_DMA_A_Y_SIZE 0x00401060 ++#define NV04_PGRAPH_DMA_B_XLATE_INST 0x00401080 ++#define NV04_PGRAPH_DMA_B_CONTROL 0x00401084 ++#define NV04_PGRAPH_DMA_B_LIMIT 0x00401088 ++#define NV04_PGRAPH_DMA_B_TLB_PTE 0x0040108C ++#define NV04_PGRAPH_DMA_B_TLB_TAG 0x00401090 ++#define NV04_PGRAPH_DMA_B_ADJ_OFFSET 0x00401094 ++#define NV04_PGRAPH_DMA_B_OFFSET 0x00401098 ++#define NV04_PGRAPH_DMA_B_SIZE 0x0040109C ++#define NV04_PGRAPH_DMA_B_Y_SIZE 0x004010A0 ++#define NV40_PGRAPH_TILE1(i) (0x00406900 + (i*16)) ++#define NV40_PGRAPH_TLIMIT1(i) (0x00406904 + (i*16)) ++#define NV40_PGRAPH_TSIZE1(i) (0x00406908 + (i*16)) ++#define NV40_PGRAPH_TSTATUS1(i) (0x0040690C + (i*16)) ++ ++ ++/* It's a guess that this works on NV03. Confirmed on NV04, though */ ++#define NV04_PFIFO_DELAY_0 0x00002040 ++#define NV04_PFIFO_DMA_TIMESLICE 0x00002044 ++#define NV04_PFIFO_NEXT_CHANNEL 0x00002050 ++#define NV03_PFIFO_INTR_0 0x00002100 ++#define NV03_PFIFO_INTR_EN_0 0x00002140 ++# define NV_PFIFO_INTR_CACHE_ERROR (1<< 0) ++# define NV_PFIFO_INTR_RUNOUT (1<< 4) ++# define NV_PFIFO_INTR_RUNOUT_OVERFLOW (1<< 8) ++# define NV_PFIFO_INTR_DMA_PUSHER (1<<12) ++# define NV_PFIFO_INTR_DMA_PT (1<<16) ++# define NV_PFIFO_INTR_SEMAPHORE (1<<20) ++# define NV_PFIFO_INTR_ACQUIRE_TIMEOUT (1<<24) ++#define NV03_PFIFO_RAMHT 0x00002210 ++#define NV03_PFIFO_RAMFC 0x00002214 ++#define NV03_PFIFO_RAMRO 0x00002218 ++#define NV40_PFIFO_RAMFC 0x00002220 ++#define NV03_PFIFO_CACHES 0x00002500 ++#define NV04_PFIFO_MODE 0x00002504 ++#define NV04_PFIFO_DMA 0x00002508 ++#define NV04_PFIFO_SIZE 0x0000250c ++#define NV50_PFIFO_CTX_TABLE(c) (0x2600+(c)*4) ++#define NV50_PFIFO_CTX_TABLE__SIZE 128 ++#define NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED (1<<31) ++#define NV50_PFIFO_CTX_TABLE_UNK30_BAD (1<<30) ++#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80 0x0FFFFFFF ++#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84 0x00FFFFFF ++#define NV03_PFIFO_CACHE0_PUSH0 0x00003000 ++#define NV03_PFIFO_CACHE0_PULL0 0x00003040 ++#define NV04_PFIFO_CACHE0_PULL0 0x00003050 ++#define NV04_PFIFO_CACHE0_PULL1 0x00003054 ++#define NV03_PFIFO_CACHE1_PUSH0 0x00003200 ++#define NV03_PFIFO_CACHE1_PUSH1 0x00003204 ++#define NV03_PFIFO_CACHE1_PUSH1_DMA (1<<8) ++#define NV40_PFIFO_CACHE1_PUSH1_DMA (1<<16) ++#define NV03_PFIFO_CACHE1_PUSH1_CHID_MASK 0x0000000f ++#define NV10_PFIFO_CACHE1_PUSH1_CHID_MASK 0x0000001f ++#define NV50_PFIFO_CACHE1_PUSH1_CHID_MASK 0x0000007f ++#define NV03_PFIFO_CACHE1_PUT 0x00003210 ++#define NV04_PFIFO_CACHE1_DMA_PUSH 0x00003220 ++#define NV04_PFIFO_CACHE1_DMA_FETCH 0x00003224 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_8_BYTES 0x00000000 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_16_BYTES 0x00000008 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_24_BYTES 0x00000010 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_32_BYTES 0x00000018 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_40_BYTES 0x00000020 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_48_BYTES 0x00000028 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_56_BYTES 0x00000030 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_64_BYTES 0x00000038 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_72_BYTES 0x00000040 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_80_BYTES 0x00000048 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_88_BYTES 0x00000050 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_96_BYTES 0x00000058 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_104_BYTES 0x00000060 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_112_BYTES 0x00000068 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_120_BYTES 0x00000070 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES 0x00000078 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_136_BYTES 0x00000080 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_144_BYTES 0x00000088 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_152_BYTES 0x00000090 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_160_BYTES 0x00000098 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_168_BYTES 0x000000A0 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_176_BYTES 0x000000A8 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_184_BYTES 0x000000B0 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_192_BYTES 0x000000B8 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_200_BYTES 0x000000C0 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_208_BYTES 0x000000C8 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_216_BYTES 0x000000D0 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_224_BYTES 0x000000D8 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_232_BYTES 0x000000E0 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_240_BYTES 0x000000E8 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_248_BYTES 0x000000F0 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_256_BYTES 0x000000F8 ++# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE 0x0000E000 ++# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_32_BYTES 0x00000000 ++# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_64_BYTES 0x00002000 ++# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_96_BYTES 0x00004000 ++# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES 0x00006000 ++# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_160_BYTES 0x00008000 ++# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_192_BYTES 0x0000A000 ++# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_224_BYTES 0x0000C000 ++# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_256_BYTES 0x0000E000 ++# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS 0x001F0000 ++# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_0 0x00000000 ++# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_1 0x00010000 ++# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_2 0x00020000 ++# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_3 0x00030000 ++# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_4 0x00040000 ++# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_5 0x00050000 ++# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_6 0x00060000 ++# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_7 0x00070000 ++# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 0x00080000 ++# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_9 0x00090000 ++# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_10 0x000A0000 ++# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_11 0x000B0000 ++# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_12 0x000C0000 ++# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_13 0x000D0000 ++# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_14 0x000E0000 ++# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_15 0x000F0000 ++# define NV_PFIFO_CACHE1_ENDIAN 0x80000000 ++# define NV_PFIFO_CACHE1_LITTLE_ENDIAN 0x7FFFFFFF ++# define NV_PFIFO_CACHE1_BIG_ENDIAN 0x80000000 ++#define NV04_PFIFO_CACHE1_DMA_STATE 0x00003228 ++#define NV04_PFIFO_CACHE1_DMA_INSTANCE 0x0000322c ++#define NV04_PFIFO_CACHE1_DMA_CTL 0x00003230 ++#define NV04_PFIFO_CACHE1_DMA_PUT 0x00003240 ++#define NV04_PFIFO_CACHE1_DMA_GET 0x00003244 ++#define NV10_PFIFO_CACHE1_REF_CNT 0x00003248 ++#define NV10_PFIFO_CACHE1_DMA_SUBROUTINE 0x0000324C ++#define NV03_PFIFO_CACHE1_PULL0 0x00003240 ++#define NV04_PFIFO_CACHE1_PULL0 0x00003250 ++#define NV03_PFIFO_CACHE1_PULL1 0x00003250 ++#define NV04_PFIFO_CACHE1_PULL1 0x00003254 ++#define NV04_PFIFO_CACHE1_HASH 0x00003258 ++#define NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT 0x00003260 ++#define NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP 0x00003264 ++#define NV10_PFIFO_CACHE1_ACQUIRE_VALUE 0x00003268 ++#define NV10_PFIFO_CACHE1_SEMAPHORE 0x0000326C ++#define NV03_PFIFO_CACHE1_GET 0x00003270 ++#define NV04_PFIFO_CACHE1_ENGINE 0x00003280 ++#define NV04_PFIFO_CACHE1_DMA_DCOUNT 0x000032A0 ++#define NV40_PFIFO_GRCTX_INSTANCE 0x000032E0 ++#define NV40_PFIFO_UNK32E4 0x000032E4 ++#define NV04_PFIFO_CACHE1_METHOD(i) (0x00003800+(i*8)) ++#define NV04_PFIFO_CACHE1_DATA(i) (0x00003804+(i*8)) ++#define NV40_PFIFO_CACHE1_METHOD(i) (0x00090000+(i*8)) ++#define NV40_PFIFO_CACHE1_DATA(i) (0x00090004+(i*8)) ++ ++#define NV_CRTC0_INTSTAT 0x00600100 ++#define NV_CRTC0_INTEN 0x00600140 ++#define NV_CRTC1_INTSTAT 0x00602100 ++#define NV_CRTC1_INTEN 0x00602140 ++# define NV_CRTC_INTR_VBLANK (1<<0) ++ ++/* This name is a partial guess. */ ++#define NV50_DISPLAY_SUPERVISOR 0x00610024 ++ ++/* Fifo commands. These are not regs, neither masks */ ++#define NV03_FIFO_CMD_JUMP 0x20000000 ++#define NV03_FIFO_CMD_JUMP_OFFSET_MASK 0x1ffffffc ++#define NV03_FIFO_CMD_REWIND (NV03_FIFO_CMD_JUMP | (0 & NV03_FIFO_CMD_JUMP_OFFSET_MASK)) ++ ++/* RAMFC offsets */ ++#define NV04_RAMFC_DMA_PUT 0x00 ++#define NV04_RAMFC_DMA_GET 0x04 ++#define NV04_RAMFC_DMA_INSTANCE 0x08 ++#define NV04_RAMFC_DMA_STATE 0x0C ++#define NV04_RAMFC_DMA_FETCH 0x10 ++#define NV04_RAMFC_ENGINE 0x14 ++#define NV04_RAMFC_PULL1_ENGINE 0x18 ++ ++#define NV10_RAMFC_DMA_PUT 0x00 ++#define NV10_RAMFC_DMA_GET 0x04 ++#define NV10_RAMFC_REF_CNT 0x08 ++#define NV10_RAMFC_DMA_INSTANCE 0x0C ++#define NV10_RAMFC_DMA_STATE 0x10 ++#define NV10_RAMFC_DMA_FETCH 0x14 ++#define NV10_RAMFC_ENGINE 0x18 ++#define NV10_RAMFC_PULL1_ENGINE 0x1C ++#define NV10_RAMFC_ACQUIRE_VALUE 0x20 ++#define NV10_RAMFC_ACQUIRE_TIMESTAMP 0x24 ++#define NV10_RAMFC_ACQUIRE_TIMEOUT 0x28 ++#define NV10_RAMFC_SEMAPHORE 0x2C ++#define NV10_RAMFC_DMA_SUBROUTINE 0x30 ++ ++#define NV40_RAMFC_DMA_PUT 0x00 ++#define NV40_RAMFC_DMA_GET 0x04 ++#define NV40_RAMFC_REF_CNT 0x08 ++#define NV40_RAMFC_DMA_INSTANCE 0x0C ++#define NV40_RAMFC_DMA_DCOUNT /* ? */ 0x10 ++#define NV40_RAMFC_DMA_STATE 0x14 ++#define NV40_RAMFC_DMA_FETCH 0x18 ++#define NV40_RAMFC_ENGINE 0x1C ++#define NV40_RAMFC_PULL1_ENGINE 0x20 ++#define NV40_RAMFC_ACQUIRE_VALUE 0x24 ++#define NV40_RAMFC_ACQUIRE_TIMESTAMP 0x28 ++#define NV40_RAMFC_ACQUIRE_TIMEOUT 0x2C ++#define NV40_RAMFC_SEMAPHORE 0x30 ++#define NV40_RAMFC_DMA_SUBROUTINE 0x34 ++#define NV40_RAMFC_GRCTX_INSTANCE /* guess */ 0x38 ++#define NV40_RAMFC_DMA_TIMESLICE 0x3C ++#define NV40_RAMFC_UNK_40 0x40 ++#define NV40_RAMFC_UNK_44 0x44 ++#define NV40_RAMFC_UNK_48 0x48 ++#define NV40_RAMFC_UNK_4C 0x4C ++#define NV40_RAMFC_UNK_50 0x50 +diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c +new file mode 100644 +index 0000000..b35bfb7 +--- /dev/null ++++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c +@@ -0,0 +1,342 @@ ++#include "drmP.h" ++#include "nouveau_drv.h" ++#include ++ ++#define NV_CTXDMA_PAGE_SHIFT 12 ++#define NV_CTXDMA_PAGE_SIZE (1 << NV_CTXDMA_PAGE_SHIFT) ++#define NV_CTXDMA_PAGE_MASK (NV_CTXDMA_PAGE_SIZE - 1) ++ ++struct nouveau_sgdma_be { ++ struct drm_ttm_backend backend; ++ struct drm_device *dev; ++ ++ int pages; ++ int pages_populated; ++ dma_addr_t *pagelist; ++ int is_bound; ++ ++ unsigned int pte_start; ++}; ++ ++static int ++nouveau_sgdma_needs_ub_cache_adjust(struct drm_ttm_backend *be) ++{ ++ return ((be->flags & DRM_BE_FLAG_BOUND_CACHED) ? 0 : 1); ++} ++ ++static int ++nouveau_sgdma_populate(struct drm_ttm_backend *be, unsigned long num_pages, ++ struct page **pages, struct page *dummy_read_page) ++{ ++ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; ++ int p, d, o; ++ ++ DRM_DEBUG("num_pages = %ld\n", num_pages); ++ ++ if (nvbe->pagelist) ++ return -EINVAL; ++ nvbe->pages = (num_pages << PAGE_SHIFT) >> NV_CTXDMA_PAGE_SHIFT; ++ nvbe->pagelist = drm_alloc(nvbe->pages*sizeof(dma_addr_t), ++ DRM_MEM_PAGES); ++ ++ nvbe->pages_populated = d = 0; ++ for (p = 0; p < num_pages; p++) { ++ for (o = 0; o < PAGE_SIZE; o += NV_CTXDMA_PAGE_SIZE) { ++ struct page *page = pages[p]; ++ if (!page) ++ page = dummy_read_page; ++ nvbe->pagelist[d] = pci_map_page(nvbe->dev->pdev, ++ page, o, ++ NV_CTXDMA_PAGE_SIZE, ++ PCI_DMA_BIDIRECTIONAL); ++ if (pci_dma_mapping_error(nvbe->dev->pdev, nvbe->pagelist[d])) { ++ be->func->clear(be); ++ DRM_ERROR("pci_map_page failed\n"); ++ return -EINVAL; ++ } ++ nvbe->pages_populated = ++d; ++ } ++ } ++ ++ return 0; ++} ++ ++static void ++nouveau_sgdma_clear(struct drm_ttm_backend *be) ++{ ++ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; ++ int d; ++ ++ DRM_DEBUG("\n"); ++ ++ if (nvbe && nvbe->pagelist) { ++ if (nvbe->is_bound) ++ be->func->unbind(be); ++ ++ for (d = 0; d < nvbe->pages_populated; d++) { ++ pci_unmap_page(nvbe->dev->pdev, nvbe->pagelist[d], ++ NV_CTXDMA_PAGE_SIZE, ++ PCI_DMA_BIDIRECTIONAL); ++ } ++ drm_free(nvbe->pagelist, nvbe->pages*sizeof(dma_addr_t), ++ DRM_MEM_PAGES); ++ } ++} ++ ++static int ++nouveau_sgdma_bind(struct drm_ttm_backend *be, struct drm_bo_mem_reg *mem) ++{ ++ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; ++ struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; ++ struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma; ++ uint64_t offset = (mem->mm_node->start << PAGE_SHIFT); ++ uint32_t i; ++ ++ DRM_DEBUG("pg=0x%lx (0x%llx), cached=%d\n", mem->mm_node->start, ++ offset, (mem->flags & DRM_BO_FLAG_CACHED) == 1); ++ ++ if (offset & NV_CTXDMA_PAGE_MASK) ++ return -EINVAL; ++ nvbe->pte_start = (offset >> NV_CTXDMA_PAGE_SHIFT); ++ if (dev_priv->card_type < NV_50) ++ nvbe->pte_start += 2; /* skip ctxdma header */ ++ ++ for (i = nvbe->pte_start; i < nvbe->pte_start + nvbe->pages; i++) { ++ uint64_t pteval = nvbe->pagelist[i - nvbe->pte_start]; ++ ++ if (pteval & NV_CTXDMA_PAGE_MASK) { ++ DRM_ERROR("Bad pteval 0x%llx\n", pteval); ++ return -EINVAL; ++ } ++ ++ if (dev_priv->card_type < NV_50) { ++ INSTANCE_WR(gpuobj, i, pteval | 3); ++ } else { ++ INSTANCE_WR(gpuobj, (i<<1)+0, pteval | 0x21); ++ INSTANCE_WR(gpuobj, (i<<1)+1, 0x00000000); ++ } ++ } ++ ++ nvbe->is_bound = 1; ++ return 0; ++} ++ ++static int ++nouveau_sgdma_unbind(struct drm_ttm_backend *be) ++{ ++ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; ++ struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; ++ ++ DRM_DEBUG("\n"); ++ ++ if (nvbe->is_bound) { ++ struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma; ++ unsigned int pte; ++ ++ pte = nvbe->pte_start; ++ while (pte < (nvbe->pte_start + nvbe->pages)) { ++ uint64_t pteval = dev_priv->gart_info.sg_dummy_bus; ++ ++ if (dev_priv->card_type < NV_50) { ++ INSTANCE_WR(gpuobj, pte, pteval | 3); ++ } else { ++ INSTANCE_WR(gpuobj, (pte<<1)+0, pteval | 0x21); ++ INSTANCE_WR(gpuobj, (pte<<1)+1, 0x00000000); ++ } ++ ++ pte++; ++ } ++ ++ nvbe->is_bound = 0; ++ } ++ ++ return 0; ++} ++ ++static void ++nouveau_sgdma_destroy(struct drm_ttm_backend *be) ++{ ++ DRM_DEBUG("\n"); ++ if (be) { ++ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; ++ if (nvbe) { ++ if (nvbe->pagelist) ++ be->func->clear(be); ++ drm_ctl_free(nvbe, sizeof(*nvbe), DRM_MEM_TTM); ++ } ++ } ++} ++ ++static struct drm_ttm_backend_func nouveau_sgdma_backend = { ++ .needs_ub_cache_adjust = nouveau_sgdma_needs_ub_cache_adjust, ++ .populate = nouveau_sgdma_populate, ++ .clear = nouveau_sgdma_clear, ++ .bind = nouveau_sgdma_bind, ++ .unbind = nouveau_sgdma_unbind, ++ .destroy = nouveau_sgdma_destroy ++}; ++ ++struct drm_ttm_backend * ++nouveau_sgdma_init_ttm(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_sgdma_be *nvbe; ++ ++ if (!dev_priv->gart_info.sg_ctxdma) ++ return NULL; ++ ++ nvbe = drm_ctl_calloc(1, sizeof(*nvbe), DRM_MEM_TTM); ++ if (!nvbe) ++ return NULL; ++ ++ nvbe->dev = dev; ++ ++ nvbe->backend.func = &nouveau_sgdma_backend; ++ ++ return &nvbe->backend; ++} ++ ++int ++nouveau_sgdma_init(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_gpuobj *gpuobj = NULL; ++ uint32_t aper_size, obj_size; ++ int i, ret; ++ ++ if (dev_priv->card_type < NV_50) { ++ aper_size = (64 * 1024 * 1024); ++ obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4; ++ obj_size += 8; /* ctxdma header */ ++ } else { ++ /* 1 entire VM page table */ ++ aper_size = (512 * 1024 * 1024); ++ obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 8; ++ } ++ ++ if ((ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16, ++ NVOBJ_FLAG_ALLOW_NO_REFS | ++ NVOBJ_FLAG_ZERO_ALLOC | ++ NVOBJ_FLAG_ZERO_FREE, &gpuobj))) { ++ DRM_ERROR("Error creating sgdma object: %d\n", ret); ++ return ret; ++ } ++ ++ dev_priv->gart_info.sg_dummy_page = ++ alloc_page(GFP_KERNEL|__GFP_DMA32); ++ ++ dev_priv->gart_info.sg_dummy_bus = ++ pci_map_page(dev->pdev, dev_priv->gart_info.sg_dummy_page, 0, ++ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); ++ ++ if (dev_priv->card_type < NV_50) { ++ /* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and ++ * confirmed to work on c51. Perhaps means NV_DMA_TARGET_PCIE ++ * on those cards? */ ++ INSTANCE_WR(gpuobj, 0, NV_CLASS_DMA_IN_MEMORY | ++ (1 << 12) /* PT present */ | ++ (0 << 13) /* PT *not* linear */ | ++ (NV_DMA_ACCESS_RW << 14) | ++ (NV_DMA_TARGET_PCI << 16)); ++ INSTANCE_WR(gpuobj, 1, aper_size - 1); ++ for (i=2; i<2+(aper_size>>12); i++) { ++ INSTANCE_WR(gpuobj, i, ++ dev_priv->gart_info.sg_dummy_bus | 3); ++ } ++ } else { ++ for (i=0; igart_info.sg_dummy_bus | 0x21); ++ INSTANCE_WR(gpuobj, (i+4)/4, 0); ++ } ++ } ++ ++ dev_priv->gart_info.type = NOUVEAU_GART_SGDMA; ++ dev_priv->gart_info.aper_base = 0; ++ dev_priv->gart_info.aper_size = aper_size; ++ dev_priv->gart_info.sg_ctxdma = gpuobj; ++ return 0; ++} ++ ++void ++nouveau_sgdma_takedown(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ if (dev_priv->gart_info.sg_dummy_page) { ++ pci_unmap_page(dev->pdev, dev_priv->gart_info.sg_dummy_bus, ++ NV_CTXDMA_PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); ++ unlock_page(dev_priv->gart_info.sg_dummy_page); ++ __free_page(dev_priv->gart_info.sg_dummy_page); ++ dev_priv->gart_info.sg_dummy_page = NULL; ++ dev_priv->gart_info.sg_dummy_bus = 0; ++ } ++ ++ nouveau_gpuobj_del(dev, &dev_priv->gart_info.sg_ctxdma); ++} ++ ++int ++nouveau_sgdma_nottm_hack_init(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct drm_ttm_backend *be; ++ struct drm_scatter_gather sgreq; ++ struct drm_mm_node mm_node; ++ struct drm_bo_mem_reg mem; ++ int ret; ++ ++ dev_priv->gart_info.sg_be = nouveau_sgdma_init_ttm(dev); ++ if (!dev_priv->gart_info.sg_be) ++ return -ENOMEM; ++ be = dev_priv->gart_info.sg_be; ++ ++ /* Hack the aperture size down to the amount of system memory ++ * we're going to bind into it. ++ */ ++ if (dev_priv->gart_info.aper_size > 32*1024*1024) ++ dev_priv->gart_info.aper_size = 32*1024*1024; ++ ++ sgreq.size = dev_priv->gart_info.aper_size; ++ if ((ret = drm_sg_alloc(dev, &sgreq))) { ++ DRM_ERROR("drm_sg_alloc failed: %d\n", ret); ++ return ret; ++ } ++ dev_priv->gart_info.sg_handle = sgreq.handle; ++ ++ if ((ret = be->func->populate(be, dev->sg->pages, dev->sg->pagelist, dev->bm.dummy_read_page))) { ++ DRM_ERROR("failed populate: %d\n", ret); ++ return ret; ++ } ++ ++ mm_node.start = 0; ++ mem.mm_node = &mm_node; ++ ++ if ((ret = be->func->bind(be, &mem))) { ++ DRM_ERROR("failed bind: %d\n", ret); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++void ++nouveau_sgdma_nottm_hack_takedown(struct drm_device *dev) ++{ ++} ++ ++int ++nouveau_sgdma_get_page(struct drm_device *dev, uint32_t offset, uint32_t *page) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma; ++ int pte; ++ ++ pte = (offset >> NV_CTXDMA_PAGE_SHIFT); ++ if (dev_priv->card_type < NV_50) { ++ *page = INSTANCE_RD(gpuobj, (pte + 2)) & ~NV_CTXDMA_PAGE_MASK; ++ return 0; ++ } ++ ++ DRM_ERROR("Unimplemented on NV50\n"); ++ return -EINVAL; ++} +diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c +new file mode 100644 +index 0000000..8d0430f +--- /dev/null ++++ b/drivers/gpu/drm/nouveau/nouveau_state.c +@@ -0,0 +1,866 @@ ++/* ++ * Copyright 2005 Stephane Marchesin ++ * Copyright 2008 Stuart Bennett ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "drm_sarea.h" ++#include "nouveau_drv.h" ++#include "nouveau_drm.h" ++ ++static int nouveau_init_card_mappings(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int ret; ++ ++ /* resource 0 is mmio regs */ ++ /* resource 1 is linear FB */ ++ /* resource 2 is RAMIN (mmio regs + 0x1000000) */ ++ /* resource 6 is bios */ ++ ++ /* map the mmio regs */ ++ ret = drm_addmap(dev, drm_get_resource_start(dev, 0), ++ drm_get_resource_len(dev, 0), ++ _DRM_REGISTERS, _DRM_READ_ONLY, &dev_priv->mmio); ++ if (ret) { ++ DRM_ERROR("Unable to initialize the mmio mapping (%d). " ++ "Please report your setup to " DRIVER_EMAIL "\n", ++ ret); ++ return -EINVAL; ++ } ++ DRM_DEBUG("regs mapped ok at 0x%lx\n", dev_priv->mmio->offset); ++ ++ /* map larger RAMIN aperture on NV40 cards */ ++ dev_priv->ramin = NULL; ++ if (dev_priv->card_type >= NV_40) { ++ int ramin_resource = 2; ++ if (drm_get_resource_len(dev, ramin_resource) == 0) ++ ramin_resource = 3; ++ ++ ret = drm_addmap(dev, ++ drm_get_resource_start(dev, ramin_resource), ++ drm_get_resource_len(dev, ramin_resource), ++ _DRM_REGISTERS, _DRM_READ_ONLY, ++ &dev_priv->ramin); ++ if (ret) { ++ DRM_ERROR("Failed to init RAMIN mapping, " ++ "limited instance memory available\n"); ++ dev_priv->ramin = NULL; ++ } ++ } ++ ++ /* On older cards (or if the above failed), create a map covering ++ * the BAR0 PRAMIN aperture */ ++ if (!dev_priv->ramin) { ++ ret = drm_addmap(dev, ++ drm_get_resource_start(dev, 0) + NV_RAMIN, ++ (1*1024*1024), ++ _DRM_REGISTERS, _DRM_READ_ONLY, ++ &dev_priv->ramin); ++ if (ret) { ++ DRM_ERROR("Failed to map BAR0 PRAMIN: %d\n", ret); ++ return ret; ++ } ++ } ++ ++ return 0; ++} ++ ++static int nouveau_stub_init(struct drm_device *dev) { return 0; } ++static void nouveau_stub_takedown(struct drm_device *dev) {} ++ ++static int nouveau_init_engine_ptrs(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_engine *engine = &dev_priv->Engine; ++ ++ switch (dev_priv->chipset & 0xf0) { ++ case 0x00: ++ engine->instmem.init = nv04_instmem_init; ++ engine->instmem.takedown= nv04_instmem_takedown; ++ engine->instmem.populate = nv04_instmem_populate; ++ engine->instmem.clear = nv04_instmem_clear; ++ engine->instmem.bind = nv04_instmem_bind; ++ engine->instmem.unbind = nv04_instmem_unbind; ++ engine->mc.init = nv04_mc_init; ++ engine->mc.takedown = nv04_mc_takedown; ++ engine->timer.init = nv04_timer_init; ++ engine->timer.read = nv04_timer_read; ++ engine->timer.takedown = nv04_timer_takedown; ++ engine->fb.init = nv04_fb_init; ++ engine->fb.takedown = nv04_fb_takedown; ++ engine->graph.init = nv04_graph_init; ++ engine->graph.takedown = nv04_graph_takedown; ++ engine->graph.create_context = nv04_graph_create_context; ++ engine->graph.destroy_context = nv04_graph_destroy_context; ++ engine->graph.load_context = nv04_graph_load_context; ++ engine->graph.save_context = nv04_graph_save_context; ++ engine->fifo.channels = 16; ++ engine->fifo.init = nouveau_fifo_init; ++ engine->fifo.takedown = nouveau_stub_takedown; ++ engine->fifo.channel_id = nv04_fifo_channel_id; ++ engine->fifo.create_context = nv04_fifo_create_context; ++ engine->fifo.destroy_context = nv04_fifo_destroy_context; ++ engine->fifo.load_context = nv04_fifo_load_context; ++ engine->fifo.save_context = nv04_fifo_save_context; ++ break; ++ case 0x10: ++ engine->instmem.init = nv04_instmem_init; ++ engine->instmem.takedown= nv04_instmem_takedown; ++ engine->instmem.populate = nv04_instmem_populate; ++ engine->instmem.clear = nv04_instmem_clear; ++ engine->instmem.bind = nv04_instmem_bind; ++ engine->instmem.unbind = nv04_instmem_unbind; ++ engine->mc.init = nv04_mc_init; ++ engine->mc.takedown = nv04_mc_takedown; ++ engine->timer.init = nv04_timer_init; ++ engine->timer.read = nv04_timer_read; ++ engine->timer.takedown = nv04_timer_takedown; ++ engine->fb.init = nv10_fb_init; ++ engine->fb.takedown = nv10_fb_takedown; ++ engine->graph.init = nv10_graph_init; ++ engine->graph.takedown = nv10_graph_takedown; ++ engine->graph.create_context = nv10_graph_create_context; ++ engine->graph.destroy_context = nv10_graph_destroy_context; ++ engine->graph.load_context = nv10_graph_load_context; ++ engine->graph.save_context = nv10_graph_save_context; ++ engine->fifo.channels = 32; ++ engine->fifo.init = nouveau_fifo_init; ++ engine->fifo.takedown = nouveau_stub_takedown; ++ engine->fifo.channel_id = nv10_fifo_channel_id; ++ engine->fifo.create_context = nv10_fifo_create_context; ++ engine->fifo.destroy_context = nv10_fifo_destroy_context; ++ engine->fifo.load_context = nv10_fifo_load_context; ++ engine->fifo.save_context = nv10_fifo_save_context; ++ break; ++ case 0x20: ++ engine->instmem.init = nv04_instmem_init; ++ engine->instmem.takedown= nv04_instmem_takedown; ++ engine->instmem.populate = nv04_instmem_populate; ++ engine->instmem.clear = nv04_instmem_clear; ++ engine->instmem.bind = nv04_instmem_bind; ++ engine->instmem.unbind = nv04_instmem_unbind; ++ engine->mc.init = nv04_mc_init; ++ engine->mc.takedown = nv04_mc_takedown; ++ engine->timer.init = nv04_timer_init; ++ engine->timer.read = nv04_timer_read; ++ engine->timer.takedown = nv04_timer_takedown; ++ engine->fb.init = nv10_fb_init; ++ engine->fb.takedown = nv10_fb_takedown; ++ engine->graph.init = nv20_graph_init; ++ engine->graph.takedown = nv20_graph_takedown; ++ engine->graph.create_context = nv20_graph_create_context; ++ engine->graph.destroy_context = nv20_graph_destroy_context; ++ engine->graph.load_context = nv20_graph_load_context; ++ engine->graph.save_context = nv20_graph_save_context; ++ engine->fifo.channels = 32; ++ engine->fifo.init = nouveau_fifo_init; ++ engine->fifo.takedown = nouveau_stub_takedown; ++ engine->fifo.channel_id = nv10_fifo_channel_id; ++ engine->fifo.create_context = nv10_fifo_create_context; ++ engine->fifo.destroy_context = nv10_fifo_destroy_context; ++ engine->fifo.load_context = nv10_fifo_load_context; ++ engine->fifo.save_context = nv10_fifo_save_context; ++ break; ++ case 0x30: ++ engine->instmem.init = nv04_instmem_init; ++ engine->instmem.takedown= nv04_instmem_takedown; ++ engine->instmem.populate = nv04_instmem_populate; ++ engine->instmem.clear = nv04_instmem_clear; ++ engine->instmem.bind = nv04_instmem_bind; ++ engine->instmem.unbind = nv04_instmem_unbind; ++ engine->mc.init = nv04_mc_init; ++ engine->mc.takedown = nv04_mc_takedown; ++ engine->timer.init = nv04_timer_init; ++ engine->timer.read = nv04_timer_read; ++ engine->timer.takedown = nv04_timer_takedown; ++ engine->fb.init = nv10_fb_init; ++ engine->fb.takedown = nv10_fb_takedown; ++ engine->graph.init = nv30_graph_init; ++ engine->graph.takedown = nv20_graph_takedown; ++ engine->graph.create_context = nv20_graph_create_context; ++ engine->graph.destroy_context = nv20_graph_destroy_context; ++ engine->graph.load_context = nv20_graph_load_context; ++ engine->graph.save_context = nv20_graph_save_context; ++ engine->fifo.channels = 32; ++ engine->fifo.init = nouveau_fifo_init; ++ engine->fifo.takedown = nouveau_stub_takedown; ++ engine->fifo.channel_id = nv10_fifo_channel_id; ++ engine->fifo.create_context = nv10_fifo_create_context; ++ engine->fifo.destroy_context = nv10_fifo_destroy_context; ++ engine->fifo.load_context = nv10_fifo_load_context; ++ engine->fifo.save_context = nv10_fifo_save_context; ++ break; ++ case 0x40: ++ case 0x60: ++ engine->instmem.init = nv04_instmem_init; ++ engine->instmem.takedown= nv04_instmem_takedown; ++ engine->instmem.populate = nv04_instmem_populate; ++ engine->instmem.clear = nv04_instmem_clear; ++ engine->instmem.bind = nv04_instmem_bind; ++ engine->instmem.unbind = nv04_instmem_unbind; ++ engine->mc.init = nv40_mc_init; ++ engine->mc.takedown = nv40_mc_takedown; ++ engine->timer.init = nv04_timer_init; ++ engine->timer.read = nv04_timer_read; ++ engine->timer.takedown = nv04_timer_takedown; ++ engine->fb.init = nv40_fb_init; ++ engine->fb.takedown = nv40_fb_takedown; ++ engine->graph.init = nv40_graph_init; ++ engine->graph.takedown = nv40_graph_takedown; ++ engine->graph.create_context = nv40_graph_create_context; ++ engine->graph.destroy_context = nv40_graph_destroy_context; ++ engine->graph.load_context = nv40_graph_load_context; ++ engine->graph.save_context = nv40_graph_save_context; ++ engine->fifo.channels = 32; ++ engine->fifo.init = nv40_fifo_init; ++ engine->fifo.takedown = nouveau_stub_takedown; ++ engine->fifo.channel_id = nv10_fifo_channel_id; ++ engine->fifo.create_context = nv40_fifo_create_context; ++ engine->fifo.destroy_context = nv40_fifo_destroy_context; ++ engine->fifo.load_context = nv40_fifo_load_context; ++ engine->fifo.save_context = nv40_fifo_save_context; ++ break; ++ case 0x50: ++ case 0x80: /* gotta love NVIDIA's consistency.. */ ++ case 0x90: ++ engine->instmem.init = nv50_instmem_init; ++ engine->instmem.takedown= nv50_instmem_takedown; ++ engine->instmem.populate = nv50_instmem_populate; ++ engine->instmem.clear = nv50_instmem_clear; ++ engine->instmem.bind = nv50_instmem_bind; ++ engine->instmem.unbind = nv50_instmem_unbind; ++ engine->mc.init = nv50_mc_init; ++ engine->mc.takedown = nv50_mc_takedown; ++ engine->timer.init = nv04_timer_init; ++ engine->timer.read = nv04_timer_read; ++ engine->timer.takedown = nv04_timer_takedown; ++ engine->fb.init = nouveau_stub_init; ++ engine->fb.takedown = nouveau_stub_takedown; ++ engine->graph.init = nv50_graph_init; ++ engine->graph.takedown = nv50_graph_takedown; ++ engine->graph.create_context = nv50_graph_create_context; ++ engine->graph.destroy_context = nv50_graph_destroy_context; ++ engine->graph.load_context = nv50_graph_load_context; ++ engine->graph.save_context = nv50_graph_save_context; ++ engine->fifo.channels = 128; ++ engine->fifo.init = nv50_fifo_init; ++ engine->fifo.takedown = nv50_fifo_takedown; ++ engine->fifo.channel_id = nv50_fifo_channel_id; ++ engine->fifo.create_context = nv50_fifo_create_context; ++ engine->fifo.destroy_context = nv50_fifo_destroy_context; ++ engine->fifo.load_context = nv50_fifo_load_context; ++ engine->fifo.save_context = nv50_fifo_save_context; ++ break; ++ default: ++ DRM_ERROR("NV%02x unsupported\n", dev_priv->chipset); ++ return 1; ++ } ++ ++ return 0; ++} ++ ++int ++nouveau_card_init(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_engine *engine; ++ int ret; ++ ++ DRM_DEBUG("prev state = %d\n", dev_priv->init_state); ++ ++ if (dev_priv->init_state == NOUVEAU_CARD_INIT_DONE) ++ return 0; ++ dev_priv->ttm = 0; ++ ++ /* Determine exact chipset we're running on */ ++ if (dev_priv->card_type < NV_10) ++ dev_priv->chipset = dev_priv->card_type; ++ else ++ dev_priv->chipset = ++ (NV_READ(NV03_PMC_BOOT_0) & 0x0ff00000) >> 20; ++ ++ /* Initialise internal driver API hooks */ ++ ret = nouveau_init_engine_ptrs(dev); ++ if (ret) return ret; ++ engine = &dev_priv->Engine; ++ dev_priv->init_state = NOUVEAU_CARD_INIT_FAILED; ++ ++ ret = nouveau_gpuobj_early_init(dev); ++ if (ret) return ret; ++ ++ /* Initialise instance memory, must happen before mem_init so we ++ * know exactly how much VRAM we're able to use for "normal" ++ * purposes. ++ */ ++ ret = engine->instmem.init(dev); ++ if (ret) return ret; ++ ++ /* Setup the memory manager */ ++ if (dev_priv->ttm) { ++ ret = nouveau_mem_init_ttm(dev); ++ if (ret) return ret; ++ } else { ++ ret = nouveau_mem_init(dev); ++ if (ret) return ret; ++ } ++ ++ ret = nouveau_gpuobj_init(dev); ++ if (ret) return ret; ++ ++ /* Parse BIOS tables / Run init tables? */ ++ ++ /* PMC */ ++ ret = engine->mc.init(dev); ++ if (ret) return ret; ++ ++ /* PTIMER */ ++ ret = engine->timer.init(dev); ++ if (ret) return ret; ++ ++ /* PFB */ ++ ret = engine->fb.init(dev); ++ if (ret) return ret; ++ ++ /* PGRAPH */ ++ ret = engine->graph.init(dev); ++ if (ret) return ret; ++ ++ /* PFIFO */ ++ ret = engine->fifo.init(dev); ++ if (ret) return ret; ++ ++ /* this call irq_preinstall, register irq handler and ++ * call irq_postinstall ++ */ ++ ret = drm_irq_install(dev); ++ if (ret) return ret; ++ ++ /* what about PVIDEO/PCRTC/PRAMDAC etc? */ ++ ++ ret = nouveau_dma_channel_init(dev); ++ if (ret) return ret; ++ ++ dev_priv->init_state = NOUVEAU_CARD_INIT_DONE; ++ return 0; ++} ++ ++static void nouveau_card_takedown(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_engine *engine = &dev_priv->Engine; ++ ++ DRM_DEBUG("prev state = %d\n", dev_priv->init_state); ++ ++ if (dev_priv->init_state != NOUVEAU_CARD_INIT_DOWN) { ++ nouveau_dma_channel_takedown(dev); ++ ++ engine->fifo.takedown(dev); ++ engine->graph.takedown(dev); ++ engine->fb.takedown(dev); ++ engine->timer.takedown(dev); ++ engine->mc.takedown(dev); ++ ++ nouveau_sgdma_nottm_hack_takedown(dev); ++ nouveau_sgdma_takedown(dev); ++ ++ nouveau_gpuobj_takedown(dev); ++ nouveau_gpuobj_del(dev, &dev_priv->vm_vram_pt); ++ ++ nouveau_mem_close(dev); ++ engine->instmem.takedown(dev); ++ ++ drm_irq_uninstall(dev); ++ ++ nouveau_gpuobj_late_takedown(dev); ++ ++ dev_priv->init_state = NOUVEAU_CARD_INIT_DOWN; ++ } ++} ++ ++/* here a client dies, release the stuff that was allocated for its ++ * file_priv */ ++void nouveau_preclose(struct drm_device *dev, struct drm_file *file_priv) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ nouveau_fifo_cleanup(dev, file_priv); ++ nouveau_mem_release(file_priv,dev_priv->fb_heap); ++ nouveau_mem_release(file_priv,dev_priv->agp_heap); ++ nouveau_mem_release(file_priv,dev_priv->pci_heap); ++} ++ ++/* first module load, setup the mmio/fb mapping */ ++int nouveau_firstopen(struct drm_device *dev) ++{ ++#if defined(__powerpc__) ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct device_node *dn; ++#endif ++ int ret; ++ /* Map any PCI resources we need on the card */ ++ ret = nouveau_init_card_mappings(dev); ++ if (ret) return ret; ++ ++#if defined(__powerpc__) ++ /* Put the card in BE mode if it's not */ ++ if (NV_READ(NV03_PMC_BOOT_1)) ++ NV_WRITE(NV03_PMC_BOOT_1,0x00000001); ++ ++ DRM_MEMORYBARRIER(); ++#endif ++ ++#if defined(__linux__) && defined(__powerpc__) ++ /* if we have an OF card, copy vbios to RAMIN */ ++ dn = pci_device_to_OF_node(dev->pdev); ++ if (dn) ++ { ++ int size; ++ const uint32_t *bios = of_get_property(dn, "NVDA,BMP", &size); ++ if (bios) ++ { ++ int i; ++ for(i=0;iflags = flags & NOUVEAU_FLAGS; ++ dev_priv->init_state = NOUVEAU_CARD_INIT_DOWN; ++ ++ DRM_DEBUG("vendor: 0x%X device: 0x%X class: 0x%X\n", dev->pci_vendor, dev->pci_device, dev->pdev->class); ++ ++ /* Time to determine the card architecture */ ++ regs = ioremap_nocache(pci_resource_start(dev->pdev, 0), 0x8); ++ if (!regs) { ++ DRM_ERROR("Could not ioremap to determine register\n"); ++ return -ENOMEM; ++ } ++ ++ reg0 = readl(regs+NV03_PMC_BOOT_0); ++ reg1 = readl(regs+NV03_PMC_BOOT_1); ++#if defined(__powerpc__) ++ if (reg1) ++ reg0=___swab32(reg0); ++#endif ++ ++ /* We're dealing with >=NV10 */ ++ if ((reg0 & 0x0f000000) > 0 ) { ++ /* Bit 27-20 contain the architecture in hex */ ++ architecture = (reg0 & 0xff00000) >> 20; ++ /* NV04 or NV05 */ ++ } else if ((reg0 & 0xff00fff0) == 0x20004000) { ++ architecture = 0x04; ++ } ++ ++ iounmap(regs); ++ ++ if (architecture >= 0x80) { ++ dev_priv->card_type = NV_50; ++ } else if (architecture >= 0x60) { ++ /* FIXME we need to figure out who's who for NV6x */ ++ dev_priv->card_type = NV_44; ++ } else if (architecture >= 0x50) { ++ dev_priv->card_type = NV_50; ++ } else if (architecture >= 0x40) { ++ uint8_t subarch = architecture & 0xf; ++ /* Selection criteria borrowed from NV40EXA */ ++ if (NV40_CHIPSET_MASK & (1 << subarch)) { ++ dev_priv->card_type = NV_40; ++ } else if (NV44_CHIPSET_MASK & (1 << subarch)) { ++ dev_priv->card_type = NV_44; ++ } else { ++ dev_priv->card_type = NV_UNKNOWN; ++ } ++ } else if (architecture >= 0x30) { ++ dev_priv->card_type = NV_30; ++ } else if (architecture >= 0x20) { ++ dev_priv->card_type = NV_20; ++ } else if (architecture >= 0x17) { ++ dev_priv->card_type = NV_17; ++ } else if (architecture >= 0x11) { ++ dev_priv->card_type = NV_11; ++ } else if (architecture >= 0x10) { ++ dev_priv->card_type = NV_10; ++ } else if (architecture >= 0x04) { ++ dev_priv->card_type = NV_04; ++ } else { ++ dev_priv->card_type = NV_UNKNOWN; ++ } ++ ++ DRM_INFO("Detected an NV%d generation card (0x%08x)\n", dev_priv->card_type,reg0); ++ ++ if (dev_priv->card_type == NV_UNKNOWN) { ++ return -EINVAL; ++ } ++ ++ /* Special flags */ ++ if (dev->pci_device == 0x01a0) { ++ dev_priv->flags |= NV_NFORCE; ++ } else if (dev->pci_device == 0x01f0) { ++ dev_priv->flags |= NV_NFORCE2; ++ } ++ ++ dev->dev_private = (void *)dev_priv; ++ ++ return 0; ++} ++ ++void nouveau_lastclose(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ /* In the case of an error dev_priv may not be be allocated yet */ ++ if (dev_priv && dev_priv->card_type) { ++ nouveau_card_takedown(dev); ++ ++ if(dev_priv->fb_mtrr>0) ++ { ++ drm_mtrr_del(dev_priv->fb_mtrr, drm_get_resource_start(dev, 1),nouveau_mem_fb_amount(dev), DRM_MTRR_WC); ++ dev_priv->fb_mtrr=0; ++ } ++ } ++} ++ ++int nouveau_unload(struct drm_device *dev) ++{ ++ drm_free(dev->dev_private, sizeof(*dev->dev_private), DRM_MEM_DRIVER); ++ dev->dev_private = NULL; ++ return 0; ++} ++ ++int ++nouveau_ioctl_card_init(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ return nouveau_card_init(dev); ++} ++ ++int nouveau_ioctl_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct drm_nouveau_getparam *getparam = data; ++ ++ NOUVEAU_CHECK_INITIALISED_WITH_RETURN; ++ ++ switch (getparam->param) { ++ case NOUVEAU_GETPARAM_CHIPSET_ID: ++ getparam->value = dev_priv->chipset; ++ break; ++ case NOUVEAU_GETPARAM_PCI_VENDOR: ++ getparam->value=dev->pci_vendor; ++ break; ++ case NOUVEAU_GETPARAM_PCI_DEVICE: ++ getparam->value=dev->pci_device; ++ break; ++ case NOUVEAU_GETPARAM_BUS_TYPE: ++ if (drm_device_is_agp(dev)) ++ getparam->value=NV_AGP; ++ else if (drm_device_is_pcie(dev)) ++ getparam->value=NV_PCIE; ++ else ++ getparam->value=NV_PCI; ++ break; ++ case NOUVEAU_GETPARAM_FB_PHYSICAL: ++ getparam->value=dev_priv->fb_phys; ++ break; ++ case NOUVEAU_GETPARAM_AGP_PHYSICAL: ++ getparam->value=dev_priv->gart_info.aper_base; ++ break; ++ case NOUVEAU_GETPARAM_PCI_PHYSICAL: ++ if ( dev -> sg ) ++ getparam->value=(unsigned long)dev->sg->virtual; ++ else ++ { ++ DRM_ERROR("Requested PCIGART address, while no PCIGART was created\n"); ++ return -EINVAL; ++ } ++ break; ++ case NOUVEAU_GETPARAM_FB_SIZE: ++ getparam->value=dev_priv->fb_available_size; ++ break; ++ case NOUVEAU_GETPARAM_AGP_SIZE: ++ getparam->value=dev_priv->gart_info.aper_size; ++ break; ++ default: ++ DRM_ERROR("unknown parameter %lld\n", getparam->param); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++int nouveau_ioctl_setparam(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct drm_nouveau_setparam *setparam = data; ++ ++ NOUVEAU_CHECK_INITIALISED_WITH_RETURN; ++ ++ switch (setparam->param) { ++ case NOUVEAU_SETPARAM_CMDBUF_LOCATION: ++ switch (setparam->value) { ++ case NOUVEAU_MEM_AGP: ++ case NOUVEAU_MEM_FB: ++ case NOUVEAU_MEM_PCI: ++ case NOUVEAU_MEM_AGP | NOUVEAU_MEM_PCI_ACCEPTABLE: ++ break; ++ default: ++ DRM_ERROR("invalid CMDBUF_LOCATION value=%lld\n", ++ setparam->value); ++ return -EINVAL; ++ } ++ dev_priv->config.cmdbuf.location = setparam->value; ++ break; ++ case NOUVEAU_SETPARAM_CMDBUF_SIZE: ++ dev_priv->config.cmdbuf.size = setparam->value; ++ break; ++ default: ++ DRM_ERROR("unknown parameter %lld\n", setparam->param); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++/* waits for idle */ ++void nouveau_wait_for_idle(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv=dev->dev_private; ++ switch(dev_priv->card_type) { ++ case NV_50: ++ break; ++ default: { ++ /* This stuff is more or less a copy of what is seen ++ * in nv28 kmmio dump. ++ */ ++ uint64_t started = dev_priv->Engine.timer.read(dev); ++ uint64_t stopped = started; ++ uint32_t status; ++ do { ++ uint32_t pmc_e = NV_READ(NV03_PMC_ENABLE); ++ (void)pmc_e; ++ status = NV_READ(NV04_PGRAPH_STATUS); ++ if (!status) ++ break; ++ stopped = dev_priv->Engine.timer.read(dev); ++ /* It'll never wrap anyway... */ ++ } while (stopped - started < 1000000000ULL); ++ if (status) ++ DRM_ERROR("timed out with status 0x%08x\n", ++ status); ++ } ++ } ++} ++ ++static int nouveau_suspend(struct drm_device *dev) ++{ ++ struct mem_block *p; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_suspend_resume *susres = &dev_priv->susres; ++ struct nouveau_engine *engine = &dev_priv->Engine; ++ int i; ++ ++ drm_free(susres->ramin_copy, susres->ramin_size, DRM_MEM_DRIVER); ++ susres->ramin_size = 0; ++ list_for_each(p, dev_priv->ramin_heap) ++ if (p->file_priv && (p->start + p->size) > susres->ramin_size) ++ susres->ramin_size = p->start + p->size; ++ if (!(susres->ramin_copy = drm_alloc(susres->ramin_size, DRM_MEM_DRIVER))) { ++ DRM_ERROR("Couldn't alloc RAMIN backing for suspend\n"); ++ return -ENOMEM; ++ } ++ ++ for (i = 0; i < engine->fifo.channels; i++) { ++ uint64_t t_start = engine->timer.read(dev); ++ ++ if (dev_priv->fifos[i] == NULL) ++ continue; ++ ++ /* Give the channel a chance to idle, wait 2s (hopefully) */ ++ while (!nouveau_channel_idle(dev_priv->fifos[i])) ++ if (engine->timer.read(dev) - t_start > 2000000000ULL) { ++ DRM_ERROR("Failed to idle channel %d before" ++ "suspend.", dev_priv->fifos[i]->id); ++ return -EBUSY; ++ } ++ } ++ nouveau_wait_for_idle(dev); ++ ++ NV_WRITE(NV04_PGRAPH_FIFO, 0); ++ /* disable the fifo caches */ ++ NV_WRITE(NV03_PFIFO_CACHES, 0x00000000); ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH, ++ NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH) & ~1); ++ NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000000); ++ NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000000); ++ ++ susres->fifo_mode = NV_READ(NV04_PFIFO_MODE); ++ ++ if (dev_priv->card_type >= NV_10) { ++ susres->graph_state = NV_READ(NV10_PGRAPH_STATE); ++ susres->graph_ctx_control = NV_READ(NV10_PGRAPH_CTX_CONTROL); ++ } else { ++ susres->graph_state = NV_READ(NV04_PGRAPH_STATE); ++ susres->graph_ctx_control = NV_READ(NV04_PGRAPH_CTX_CONTROL); ++ } ++ ++ engine->fifo.save_context(dev_priv->fifos[engine->fifo.channel_id(dev)]); ++ engine->graph.save_context(dev_priv->fifos[engine->fifo.channel_id(dev)]); ++ nouveau_wait_for_idle(dev); ++ ++ for (i = 0; i < susres->ramin_size / 4; i++) ++ susres->ramin_copy[i] = NV_RI32(i << 2); ++ ++ /* reenable the fifo caches */ ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH, ++ NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH) | 1); ++ NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000001); ++ NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000001); ++ NV_WRITE(NV03_PFIFO_CACHES, 0x00000001); ++ NV_WRITE(NV04_PGRAPH_FIFO, 1); ++ ++ return 0; ++} ++ ++static int nouveau_resume(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_suspend_resume *susres = &dev_priv->susres; ++ struct nouveau_engine *engine = &dev_priv->Engine; ++ int i; ++ ++ if (!susres->ramin_copy) ++ return -EINVAL; ++ ++ DRM_DEBUG("Doing resume\n"); ++ ++ if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) { ++ struct drm_agp_info info; ++ struct drm_agp_mode mode; ++ ++ /* agp bridge drivers don't re-enable agp on resume. lame. */ ++ if ((i = drm_agp_info(dev, &info))) { ++ DRM_ERROR("Unable to get AGP info: %d\n", i); ++ return i; ++ } ++ mode.mode = info.mode; ++ if ((i = drm_agp_enable(dev, mode))) { ++ DRM_ERROR("Unable to enable AGP: %d\n", i); ++ return i; ++ } ++ } ++ ++ for (i = 0; i < susres->ramin_size / 4; i++) ++ NV_WI32(i << 2, susres->ramin_copy[i]); ++ ++ engine->mc.init(dev); ++ engine->timer.init(dev); ++ engine->fb.init(dev); ++ engine->graph.init(dev); ++ engine->fifo.init(dev); ++ ++ NV_WRITE(NV04_PGRAPH_FIFO, 0); ++ /* disable the fifo caches */ ++ NV_WRITE(NV03_PFIFO_CACHES, 0x00000000); ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH, ++ NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH) & ~1); ++ NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000000); ++ NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000000); ++ ++ /* PMC power cycling PFIFO in init clobbers some of the stuff stored in ++ * PRAMIN (such as NV04_PFIFO_CACHE1_DMA_INSTANCE). this is unhelpful ++ */ ++ for (i = 0; i < susres->ramin_size / 4; i++) ++ NV_WI32(i << 2, susres->ramin_copy[i]); ++ ++ engine->fifo.load_context(dev_priv->fifos[0]); ++ NV_WRITE(NV04_PFIFO_MODE, susres->fifo_mode); ++ ++ engine->graph.load_context(dev_priv->fifos[0]); ++ nouveau_wait_for_idle(dev); ++ ++ if (dev_priv->card_type >= NV_10) { ++ NV_WRITE(NV10_PGRAPH_STATE, susres->graph_state); ++ NV_WRITE(NV10_PGRAPH_CTX_CONTROL, susres->graph_ctx_control); ++ } else { ++ NV_WRITE(NV04_PGRAPH_STATE, susres->graph_state); ++ NV_WRITE(NV04_PGRAPH_CTX_CONTROL, susres->graph_ctx_control); ++ } ++ ++ /* reenable the fifo caches */ ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH, ++ NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH) | 1); ++ NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000001); ++ NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000001); ++ NV_WRITE(NV03_PFIFO_CACHES, 0x00000001); ++ NV_WRITE(NV04_PGRAPH_FIFO, 0x1); ++ ++ if (dev->irq_enabled) ++ nouveau_irq_postinstall(dev); ++ ++ drm_free(susres->ramin_copy, susres->ramin_size, DRM_MEM_DRIVER); ++ susres->ramin_copy = NULL; ++ susres->ramin_size = 0; ++ ++ return 0; ++} ++ ++int nouveau_ioctl_suspend(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ NOUVEAU_CHECK_INITIALISED_WITH_RETURN; ++ ++ return nouveau_suspend(dev); ++} ++ ++int nouveau_ioctl_resume(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ NOUVEAU_CHECK_INITIALISED_WITH_RETURN; ++ ++ return nouveau_resume(dev); ++} +diff --git a/drivers/gpu/drm/nouveau/nouveau_swmthd.c b/drivers/gpu/drm/nouveau/nouveau_swmthd.c +new file mode 100644 +index 0000000..c3666bf +--- /dev/null ++++ b/drivers/gpu/drm/nouveau/nouveau_swmthd.c +@@ -0,0 +1,191 @@ ++/* ++ * Copyright (C) 2007 Arthur Huillet. ++ * ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining ++ * a copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sublicense, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial ++ * portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. ++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE ++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION ++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION ++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ */ ++ ++/* ++ * Authors: ++ * Arthur Huillet ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "nouveau_drm.h" ++#include "nouveau_drv.h" ++#include "nouveau_reg.h" ++ ++/*TODO: add a "card_type" attribute*/ ++typedef struct{ ++ uint32_t oclass; /* object class for this software method */ ++ uint32_t mthd; /* method number */ ++ void (*method_code)(struct drm_device *dev, uint32_t oclass, uint32_t mthd); /* pointer to the function that does the work */ ++ } nouveau_software_method_t; ++ ++ ++ /* This function handles the NV04 setcontext software methods. ++One function for all because they are very similar.*/ ++static void nouveau_NV04_setcontext_sw_method(struct drm_device *dev, uint32_t oclass, uint32_t mthd) { ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t inst_loc = NV_READ(NV04_PGRAPH_CTX_SWITCH4) & 0xFFFF; ++ uint32_t value_to_set = 0, bit_to_set = 0; ++ ++ switch ( oclass ) { ++ case 0x4a: ++ switch ( mthd ) { ++ case 0x188 : ++ case 0x18c : ++ bit_to_set = 0; ++ break; ++ case 0x198 : ++ bit_to_set = 1 << 24; /*PATCH_STATUS_VALID*/ ++ break; ++ case 0x2fc : ++ bit_to_set = NV_READ(NV04_PGRAPH_TRAPPED_DATA) << 15; /*PATCH_CONFIG = NV04_PGRAPH_TRAPPED_DATA*/ ++ break; ++ default : ; ++ }; ++ break; ++ case 0x5c: ++ switch ( mthd ) { ++ case 0x184: ++ bit_to_set = 1 << 13; /*USER_CLIP_ENABLE*/ ++ break; ++ case 0x188: ++ case 0x18c: ++ bit_to_set = 0; ++ break; ++ case 0x198: ++ bit_to_set = 1 << 24; /*PATCH_STATUS_VALID*/ ++ break; ++ case 0x2fc : ++ bit_to_set = NV_READ(NV04_PGRAPH_TRAPPED_DATA) << 15; /*PATCH_CONFIG = NV04_PGRAPH_TRAPPED_DATA*/ ++ break; ++ }; ++ break; ++ case 0x5f: ++ switch ( mthd ) { ++ case 0x184 : ++ bit_to_set = 1 << 12; /*CHROMA_KEY_ENABLE*/ ++ break; ++ case 0x188 : ++ bit_to_set = 1 << 13; /*USER_CLIP_ENABLE*/ ++ break; ++ case 0x18c : ++ case 0x190 : ++ bit_to_set = 0; ++ break; ++ case 0x19c : ++ bit_to_set = 1 << 24; /*PATCH_STATUS_VALID*/ ++ break; ++ case 0x2fc : ++ bit_to_set = NV_READ(NV04_PGRAPH_TRAPPED_DATA) << 15; /*PATCH_CONFIG = NV04_PGRAPH_TRAPPED_DATA*/ ++ break; ++ }; ++ break; ++ case 0x61: ++ switch ( mthd ) { ++ case 0x188 : ++ bit_to_set = 1 << 13; /*USER_CLIP_ENABLE*/ ++ break; ++ case 0x18c : ++ case 0x190 : ++ bit_to_set = 0; ++ break; ++ case 0x19c : ++ bit_to_set = 1 << 24; /*PATCH_STATUS_VALID*/ ++ break; ++ case 0x2fc : ++ bit_to_set = NV_READ(NV04_PGRAPH_TRAPPED_DATA) << 15; /*PATCH_CONFIG = NV04_PGRAPH_TRAPPED_DATA*/ ++ break; ++ }; ++ break; ++ case 0x77: ++ switch ( mthd ) { ++ case 0x198 : ++ bit_to_set = 1 << 24; /*PATCH_STATUS_VALID*/ ++ break; ++ case 0x304 : ++ bit_to_set = NV_READ(NV04_PGRAPH_TRAPPED_DATA) << 15; //PATCH_CONFIG ++ break; ++ }; ++ break; ++ default :; ++ }; ++ ++ value_to_set = (NV_READ(0x00700000 | inst_loc << 4))| bit_to_set; ++ ++ /*RAMIN*/ ++ nouveau_wait_for_idle(dev); ++ NV_WRITE(0x00700000 | inst_loc << 4, value_to_set); ++ ++ /*DRM_DEBUG("CTX_SWITCH1 value is %#x\n", NV_READ(NV04_PGRAPH_CTX_SWITCH1));*/ ++ NV_WRITE(NV04_PGRAPH_CTX_SWITCH1, value_to_set); ++ ++ /*DRM_DEBUG("CTX_CACHE1 + xxx value is %#x\n", NV_READ(NV04_PGRAPH_CTX_CACHE1 + (((NV_READ(NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7) << 2)));*/ ++ NV_WRITE(NV04_PGRAPH_CTX_CACHE1 + (((NV_READ(NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7) << 2), value_to_set); ++} ++ ++ nouveau_software_method_t nouveau_sw_methods[] = { ++ /*NV04 context software methods*/ ++ { 0x4a, 0x188, nouveau_NV04_setcontext_sw_method }, ++ { 0x4a, 0x18c, nouveau_NV04_setcontext_sw_method }, ++ { 0x4a, 0x198, nouveau_NV04_setcontext_sw_method }, ++ { 0x4a, 0x2fc, nouveau_NV04_setcontext_sw_method }, ++ { 0x5c, 0x184, nouveau_NV04_setcontext_sw_method }, ++ { 0x5c, 0x188, nouveau_NV04_setcontext_sw_method }, ++ { 0x5c, 0x18c, nouveau_NV04_setcontext_sw_method }, ++ { 0x5c, 0x198, nouveau_NV04_setcontext_sw_method }, ++ { 0x5c, 0x2fc, nouveau_NV04_setcontext_sw_method }, ++ { 0x5f, 0x184, nouveau_NV04_setcontext_sw_method }, ++ { 0x5f, 0x188, nouveau_NV04_setcontext_sw_method }, ++ { 0x5f, 0x18c, nouveau_NV04_setcontext_sw_method }, ++ { 0x5f, 0x190, nouveau_NV04_setcontext_sw_method }, ++ { 0x5f, 0x19c, nouveau_NV04_setcontext_sw_method }, ++ { 0x5f, 0x2fc, nouveau_NV04_setcontext_sw_method }, ++ { 0x61, 0x188, nouveau_NV04_setcontext_sw_method }, ++ { 0x61, 0x18c, nouveau_NV04_setcontext_sw_method }, ++ { 0x61, 0x190, nouveau_NV04_setcontext_sw_method }, ++ { 0x61, 0x19c, nouveau_NV04_setcontext_sw_method }, ++ { 0x61, 0x2fc, nouveau_NV04_setcontext_sw_method }, ++ { 0x77, 0x198, nouveau_NV04_setcontext_sw_method }, ++ { 0x77, 0x304, nouveau_NV04_setcontext_sw_method }, ++ /*terminator*/ ++ { 0x0, 0x0, NULL, }, ++ }; ++ ++ int nouveau_sw_method_execute(struct drm_device *dev, uint32_t oclass, uint32_t method) { ++ int i = 0; ++ while ( nouveau_sw_methods[ i ] . method_code != NULL ) ++ { ++ if ( nouveau_sw_methods[ i ] . oclass == oclass && nouveau_sw_methods[ i ] . mthd == method ) ++ { ++ nouveau_sw_methods[ i ] . method_code(dev, oclass, method); ++ return 0; ++ } ++ i ++; ++ } ++ ++ return 1; ++ } +diff --git a/drivers/gpu/drm/nouveau/nouveau_swmthd.h b/drivers/gpu/drm/nouveau/nouveau_swmthd.h +new file mode 100644 +index 0000000..5b9409f +--- /dev/null ++++ b/drivers/gpu/drm/nouveau/nouveau_swmthd.h +@@ -0,0 +1,33 @@ ++/* ++ * Copyright (C) 2007 Arthur Huillet. ++ * ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining ++ * a copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sublicense, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial ++ * portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. ++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE ++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION ++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION ++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ */ ++ ++/* ++ * Authors: ++ * Arthur Huillet ++ */ ++ ++int nouveau_sw_method_execute(struct drm_device *dev, uint32_t oclass, uint32_t method); /* execute the given software method, returns 0 on success */ +diff --git a/drivers/gpu/drm/nouveau/nv04_fb.c b/drivers/gpu/drm/nouveau/nv04_fb.c +new file mode 100644 +index 0000000..58a9247 +--- /dev/null ++++ b/drivers/gpu/drm/nouveau/nv04_fb.c +@@ -0,0 +1,23 @@ ++#include "drmP.h" ++#include "drm.h" ++#include "nouveau_drv.h" ++#include "nouveau_drm.h" ++ ++int ++nv04_fb_init(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ /* This is what the DDX did for NV_ARCH_04, but a mmio-trace shows ++ * nvidia reading PFB_CFG_0, then writing back its original value. ++ * (which was 0x701114 in this case) ++ */ ++ NV_WRITE(NV04_PFB_CFG0, 0x1114); ++ ++ return 0; ++} ++ ++void ++nv04_fb_takedown(struct drm_device *dev) ++{ ++} +diff --git a/drivers/gpu/drm/nouveau/nv04_fifo.c b/drivers/gpu/drm/nouveau/nv04_fifo.c +new file mode 100644 +index 0000000..88186fe +--- /dev/null ++++ b/drivers/gpu/drm/nouveau/nv04_fifo.c +@@ -0,0 +1,138 @@ ++/* ++ * Copyright (C) 2007 Ben Skeggs. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining ++ * a copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sublicense, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial ++ * portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. ++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE ++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION ++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION ++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "nouveau_drv.h" ++ ++#define RAMFC_WR(offset,val) INSTANCE_WR(chan->ramfc->gpuobj, \ ++ NV04_RAMFC_##offset/4, (val)) ++#define RAMFC_RD(offset) INSTANCE_RD(chan->ramfc->gpuobj, \ ++ NV04_RAMFC_##offset/4) ++#define NV04_RAMFC(c) (dev_priv->ramfc_offset + ((c) * NV04_RAMFC__SIZE)) ++#define NV04_RAMFC__SIZE 32 ++ ++int ++nv04_fifo_channel_id(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ return (NV_READ(NV03_PFIFO_CACHE1_PUSH1) & ++ NV03_PFIFO_CACHE1_PUSH1_CHID_MASK); ++} ++ ++int ++nv04_fifo_create_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int ret; ++ ++ if ((ret = nouveau_gpuobj_new_fake(dev, NV04_RAMFC(chan->id), ~0, ++ NV04_RAMFC__SIZE, ++ NVOBJ_FLAG_ZERO_ALLOC | ++ NVOBJ_FLAG_ZERO_FREE, ++ NULL, &chan->ramfc))) ++ return ret; ++ ++ /* Setup initial state */ ++ RAMFC_WR(DMA_PUT, chan->pushbuf_base); ++ RAMFC_WR(DMA_GET, chan->pushbuf_base); ++ RAMFC_WR(DMA_INSTANCE, chan->pushbuf->instance >> 4); ++ RAMFC_WR(DMA_FETCH, (NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | ++ NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | ++ NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 | ++#ifdef __BIG_ENDIAN ++ NV_PFIFO_CACHE1_BIG_ENDIAN | ++#endif ++ 0)); ++ ++ /* enable the fifo dma operation */ ++ NV_WRITE(NV04_PFIFO_MODE,NV_READ(NV04_PFIFO_MODE) | (1<id)); ++ return 0; ++} ++ ++void ++nv04_fifo_destroy_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ NV_WRITE(NV04_PFIFO_MODE, NV_READ(NV04_PFIFO_MODE)&~(1<id)); ++ ++ nouveau_gpuobj_ref_del(dev, &chan->ramfc); ++} ++ ++int ++nv04_fifo_load_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t tmp; ++ ++ NV_WRITE(NV03_PFIFO_CACHE1_PUSH1, ++ NV03_PFIFO_CACHE1_PUSH1_DMA | chan->id); ++ ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_GET, RAMFC_RD(DMA_GET)); ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUT, RAMFC_RD(DMA_PUT)); ++ ++ tmp = RAMFC_RD(DMA_INSTANCE); ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_INSTANCE, tmp & 0xFFFF); ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_DCOUNT, tmp >> 16); ++ ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_STATE, RAMFC_RD(DMA_STATE)); ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_FETCH, RAMFC_RD(DMA_FETCH)); ++ NV_WRITE(NV04_PFIFO_CACHE1_ENGINE, RAMFC_RD(ENGINE)); ++ NV_WRITE(NV04_PFIFO_CACHE1_PULL1, RAMFC_RD(PULL1_ENGINE)); ++ ++ /* Reset NV04_PFIFO_CACHE1_DMA_CTL_AT_INFO to INVALID */ ++ tmp = NV_READ(NV04_PFIFO_CACHE1_DMA_CTL) & ~(1<<31); ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_CTL, tmp); ++ ++ return 0; ++} ++ ++int ++nv04_fifo_save_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t tmp; ++ ++ RAMFC_WR(DMA_PUT, NV_READ(NV04_PFIFO_CACHE1_DMA_PUT)); ++ RAMFC_WR(DMA_GET, NV_READ(NV04_PFIFO_CACHE1_DMA_GET)); ++ ++ tmp = NV_READ(NV04_PFIFO_CACHE1_DMA_DCOUNT) << 16; ++ tmp |= NV_READ(NV04_PFIFO_CACHE1_DMA_INSTANCE); ++ RAMFC_WR(DMA_INSTANCE, tmp); ++ ++ RAMFC_WR(DMA_STATE, NV_READ(NV04_PFIFO_CACHE1_DMA_STATE)); ++ RAMFC_WR(DMA_FETCH, NV_READ(NV04_PFIFO_CACHE1_DMA_FETCH)); ++ RAMFC_WR(ENGINE, NV_READ(NV04_PFIFO_CACHE1_ENGINE)); ++ RAMFC_WR(PULL1_ENGINE, NV_READ(NV04_PFIFO_CACHE1_PULL1)); ++ ++ return 0; ++} +diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c +new file mode 100644 +index 0000000..6caae25 +--- /dev/null ++++ b/drivers/gpu/drm/nouveau/nv04_graph.c +@@ -0,0 +1,516 @@ ++/* ++ * Copyright 2007 Stephane Marchesin ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "nouveau_drm.h" ++#include "nouveau_drv.h" ++ ++static uint32_t nv04_graph_ctx_regs [] = { ++ NV04_PGRAPH_CTX_SWITCH1, ++ NV04_PGRAPH_CTX_SWITCH2, ++ NV04_PGRAPH_CTX_SWITCH3, ++ NV04_PGRAPH_CTX_SWITCH4, ++ NV04_PGRAPH_CTX_CACHE1, ++ NV04_PGRAPH_CTX_CACHE2, ++ NV04_PGRAPH_CTX_CACHE3, ++ NV04_PGRAPH_CTX_CACHE4, ++ 0x00400184, ++ 0x004001a4, ++ 0x004001c4, ++ 0x004001e4, ++ 0x00400188, ++ 0x004001a8, ++ 0x004001c8, ++ 0x004001e8, ++ 0x0040018c, ++ 0x004001ac, ++ 0x004001cc, ++ 0x004001ec, ++ 0x00400190, ++ 0x004001b0, ++ 0x004001d0, ++ 0x004001f0, ++ 0x00400194, ++ 0x004001b4, ++ 0x004001d4, ++ 0x004001f4, ++ 0x00400198, ++ 0x004001b8, ++ 0x004001d8, ++ 0x004001f8, ++ 0x0040019c, ++ 0x004001bc, ++ 0x004001dc, ++ 0x004001fc, ++ 0x00400174, ++ NV04_PGRAPH_DMA_START_0, ++ NV04_PGRAPH_DMA_START_1, ++ NV04_PGRAPH_DMA_LENGTH, ++ NV04_PGRAPH_DMA_MISC, ++ NV04_PGRAPH_DMA_PITCH, ++ NV04_PGRAPH_BOFFSET0, ++ NV04_PGRAPH_BBASE0, ++ NV04_PGRAPH_BLIMIT0, ++ NV04_PGRAPH_BOFFSET1, ++ NV04_PGRAPH_BBASE1, ++ NV04_PGRAPH_BLIMIT1, ++ NV04_PGRAPH_BOFFSET2, ++ NV04_PGRAPH_BBASE2, ++ NV04_PGRAPH_BLIMIT2, ++ NV04_PGRAPH_BOFFSET3, ++ NV04_PGRAPH_BBASE3, ++ NV04_PGRAPH_BLIMIT3, ++ NV04_PGRAPH_BOFFSET4, ++ NV04_PGRAPH_BBASE4, ++ NV04_PGRAPH_BLIMIT4, ++ NV04_PGRAPH_BOFFSET5, ++ NV04_PGRAPH_BBASE5, ++ NV04_PGRAPH_BLIMIT5, ++ NV04_PGRAPH_BPITCH0, ++ NV04_PGRAPH_BPITCH1, ++ NV04_PGRAPH_BPITCH2, ++ NV04_PGRAPH_BPITCH3, ++ NV04_PGRAPH_BPITCH4, ++ NV04_PGRAPH_SURFACE, ++ NV04_PGRAPH_STATE, ++ NV04_PGRAPH_BSWIZZLE2, ++ NV04_PGRAPH_BSWIZZLE5, ++ NV04_PGRAPH_BPIXEL, ++ NV04_PGRAPH_NOTIFY, ++ NV04_PGRAPH_PATT_COLOR0, ++ NV04_PGRAPH_PATT_COLOR1, ++ NV04_PGRAPH_PATT_COLORRAM+0x00, ++ NV04_PGRAPH_PATT_COLORRAM+0x01, ++ NV04_PGRAPH_PATT_COLORRAM+0x02, ++ NV04_PGRAPH_PATT_COLORRAM+0x03, ++ NV04_PGRAPH_PATT_COLORRAM+0x04, ++ NV04_PGRAPH_PATT_COLORRAM+0x05, ++ NV04_PGRAPH_PATT_COLORRAM+0x06, ++ NV04_PGRAPH_PATT_COLORRAM+0x07, ++ NV04_PGRAPH_PATT_COLORRAM+0x08, ++ NV04_PGRAPH_PATT_COLORRAM+0x09, ++ NV04_PGRAPH_PATT_COLORRAM+0x0A, ++ NV04_PGRAPH_PATT_COLORRAM+0x0B, ++ NV04_PGRAPH_PATT_COLORRAM+0x0C, ++ NV04_PGRAPH_PATT_COLORRAM+0x0D, ++ NV04_PGRAPH_PATT_COLORRAM+0x0E, ++ NV04_PGRAPH_PATT_COLORRAM+0x0F, ++ NV04_PGRAPH_PATT_COLORRAM+0x10, ++ NV04_PGRAPH_PATT_COLORRAM+0x11, ++ NV04_PGRAPH_PATT_COLORRAM+0x12, ++ NV04_PGRAPH_PATT_COLORRAM+0x13, ++ NV04_PGRAPH_PATT_COLORRAM+0x14, ++ NV04_PGRAPH_PATT_COLORRAM+0x15, ++ NV04_PGRAPH_PATT_COLORRAM+0x16, ++ NV04_PGRAPH_PATT_COLORRAM+0x17, ++ NV04_PGRAPH_PATT_COLORRAM+0x18, ++ NV04_PGRAPH_PATT_COLORRAM+0x19, ++ NV04_PGRAPH_PATT_COLORRAM+0x1A, ++ NV04_PGRAPH_PATT_COLORRAM+0x1B, ++ NV04_PGRAPH_PATT_COLORRAM+0x1C, ++ NV04_PGRAPH_PATT_COLORRAM+0x1D, ++ NV04_PGRAPH_PATT_COLORRAM+0x1E, ++ NV04_PGRAPH_PATT_COLORRAM+0x1F, ++ NV04_PGRAPH_PATT_COLORRAM+0x20, ++ NV04_PGRAPH_PATT_COLORRAM+0x21, ++ NV04_PGRAPH_PATT_COLORRAM+0x22, ++ NV04_PGRAPH_PATT_COLORRAM+0x23, ++ NV04_PGRAPH_PATT_COLORRAM+0x24, ++ NV04_PGRAPH_PATT_COLORRAM+0x25, ++ NV04_PGRAPH_PATT_COLORRAM+0x26, ++ NV04_PGRAPH_PATT_COLORRAM+0x27, ++ NV04_PGRAPH_PATT_COLORRAM+0x28, ++ NV04_PGRAPH_PATT_COLORRAM+0x29, ++ NV04_PGRAPH_PATT_COLORRAM+0x2A, ++ NV04_PGRAPH_PATT_COLORRAM+0x2B, ++ NV04_PGRAPH_PATT_COLORRAM+0x2C, ++ NV04_PGRAPH_PATT_COLORRAM+0x2D, ++ NV04_PGRAPH_PATT_COLORRAM+0x2E, ++ NV04_PGRAPH_PATT_COLORRAM+0x2F, ++ NV04_PGRAPH_PATT_COLORRAM+0x30, ++ NV04_PGRAPH_PATT_COLORRAM+0x31, ++ NV04_PGRAPH_PATT_COLORRAM+0x32, ++ NV04_PGRAPH_PATT_COLORRAM+0x33, ++ NV04_PGRAPH_PATT_COLORRAM+0x34, ++ NV04_PGRAPH_PATT_COLORRAM+0x35, ++ NV04_PGRAPH_PATT_COLORRAM+0x36, ++ NV04_PGRAPH_PATT_COLORRAM+0x37, ++ NV04_PGRAPH_PATT_COLORRAM+0x38, ++ NV04_PGRAPH_PATT_COLORRAM+0x39, ++ NV04_PGRAPH_PATT_COLORRAM+0x3A, ++ NV04_PGRAPH_PATT_COLORRAM+0x3B, ++ NV04_PGRAPH_PATT_COLORRAM+0x3C, ++ NV04_PGRAPH_PATT_COLORRAM+0x3D, ++ NV04_PGRAPH_PATT_COLORRAM+0x3E, ++ NV04_PGRAPH_PATT_COLORRAM+0x3F, ++ NV04_PGRAPH_PATTERN, ++ 0x0040080c, ++ NV04_PGRAPH_PATTERN_SHAPE, ++ 0x00400600, ++ NV04_PGRAPH_ROP3, ++ NV04_PGRAPH_CHROMA, ++ NV04_PGRAPH_BETA_AND, ++ NV04_PGRAPH_BETA_PREMULT, ++ NV04_PGRAPH_CONTROL0, ++ NV04_PGRAPH_CONTROL1, ++ NV04_PGRAPH_CONTROL2, ++ NV04_PGRAPH_BLEND, ++ NV04_PGRAPH_STORED_FMT, ++ NV04_PGRAPH_SOURCE_COLOR, ++ 0x00400560, ++ 0x00400568, ++ 0x00400564, ++ 0x0040056c, ++ 0x00400400, ++ 0x00400480, ++ 0x00400404, ++ 0x00400484, ++ 0x00400408, ++ 0x00400488, ++ 0x0040040c, ++ 0x0040048c, ++ 0x00400410, ++ 0x00400490, ++ 0x00400414, ++ 0x00400494, ++ 0x00400418, ++ 0x00400498, ++ 0x0040041c, ++ 0x0040049c, ++ 0x00400420, ++ 0x004004a0, ++ 0x00400424, ++ 0x004004a4, ++ 0x00400428, ++ 0x004004a8, ++ 0x0040042c, ++ 0x004004ac, ++ 0x00400430, ++ 0x004004b0, ++ 0x00400434, ++ 0x004004b4, ++ 0x00400438, ++ 0x004004b8, ++ 0x0040043c, ++ 0x004004bc, ++ 0x00400440, ++ 0x004004c0, ++ 0x00400444, ++ 0x004004c4, ++ 0x00400448, ++ 0x004004c8, ++ 0x0040044c, ++ 0x004004cc, ++ 0x00400450, ++ 0x004004d0, ++ 0x00400454, ++ 0x004004d4, ++ 0x00400458, ++ 0x004004d8, ++ 0x0040045c, ++ 0x004004dc, ++ 0x00400460, ++ 0x004004e0, ++ 0x00400464, ++ 0x004004e4, ++ 0x00400468, ++ 0x004004e8, ++ 0x0040046c, ++ 0x004004ec, ++ 0x00400470, ++ 0x004004f0, ++ 0x00400474, ++ 0x004004f4, ++ 0x00400478, ++ 0x004004f8, ++ 0x0040047c, ++ 0x004004fc, ++ 0x0040053c, ++ 0x00400544, ++ 0x00400540, ++ 0x00400548, ++ 0x00400560, ++ 0x00400568, ++ 0x00400564, ++ 0x0040056c, ++ 0x00400534, ++ 0x00400538, ++ 0x00400514, ++ 0x00400518, ++ 0x0040051c, ++ 0x00400520, ++ 0x00400524, ++ 0x00400528, ++ 0x0040052c, ++ 0x00400530, ++ 0x00400d00, ++ 0x00400d40, ++ 0x00400d80, ++ 0x00400d04, ++ 0x00400d44, ++ 0x00400d84, ++ 0x00400d08, ++ 0x00400d48, ++ 0x00400d88, ++ 0x00400d0c, ++ 0x00400d4c, ++ 0x00400d8c, ++ 0x00400d10, ++ 0x00400d50, ++ 0x00400d90, ++ 0x00400d14, ++ 0x00400d54, ++ 0x00400d94, ++ 0x00400d18, ++ 0x00400d58, ++ 0x00400d98, ++ 0x00400d1c, ++ 0x00400d5c, ++ 0x00400d9c, ++ 0x00400d20, ++ 0x00400d60, ++ 0x00400da0, ++ 0x00400d24, ++ 0x00400d64, ++ 0x00400da4, ++ 0x00400d28, ++ 0x00400d68, ++ 0x00400da8, ++ 0x00400d2c, ++ 0x00400d6c, ++ 0x00400dac, ++ 0x00400d30, ++ 0x00400d70, ++ 0x00400db0, ++ 0x00400d34, ++ 0x00400d74, ++ 0x00400db4, ++ 0x00400d38, ++ 0x00400d78, ++ 0x00400db8, ++ 0x00400d3c, ++ 0x00400d7c, ++ 0x00400dbc, ++ 0x00400590, ++ 0x00400594, ++ 0x00400598, ++ 0x0040059c, ++ 0x004005a8, ++ 0x004005ac, ++ 0x004005b0, ++ 0x004005b4, ++ 0x004005c0, ++ 0x004005c4, ++ 0x004005c8, ++ 0x004005cc, ++ 0x004005d0, ++ 0x004005d4, ++ 0x004005d8, ++ 0x004005dc, ++ 0x004005e0, ++ NV04_PGRAPH_PASSTHRU_0, ++ NV04_PGRAPH_PASSTHRU_1, ++ NV04_PGRAPH_PASSTHRU_2, ++ NV04_PGRAPH_DVD_COLORFMT, ++ NV04_PGRAPH_SCALED_FORMAT, ++ NV04_PGRAPH_MISC24_0, ++ NV04_PGRAPH_MISC24_1, ++ NV04_PGRAPH_MISC24_2, ++ 0x00400500, ++ 0x00400504, ++ NV04_PGRAPH_VALID1, ++ NV04_PGRAPH_VALID2 ++ ++ ++}; ++ ++struct graph_state { ++ int nv04[sizeof(nv04_graph_ctx_regs)/sizeof(nv04_graph_ctx_regs[0])]; ++}; ++ ++void nouveau_nv04_context_switch(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_engine *engine = &dev_priv->Engine; ++ struct nouveau_channel *next, *last; ++ int chid; ++ ++ if (!dev) { ++ DRM_DEBUG("Invalid drm_device\n"); ++ return; ++ } ++ dev_priv = dev->dev_private; ++ if (!dev_priv) { ++ DRM_DEBUG("Invalid drm_nouveau_private\n"); ++ return; ++ } ++ if (!dev_priv->fifos) { ++ DRM_DEBUG("Invalid drm_nouveau_private->fifos\n"); ++ return; ++ } ++ ++ chid = engine->fifo.channel_id(dev); ++ next = dev_priv->fifos[chid]; ++ ++ if (!next) { ++ DRM_DEBUG("Invalid next channel\n"); ++ return; ++ } ++ ++ chid = (NV_READ(NV04_PGRAPH_CTX_USER) >> 24) & (engine->fifo.channels - 1); ++ last = dev_priv->fifos[chid]; ++ ++ if (!last) { ++ DRM_DEBUG("WARNING: Invalid last channel, switch to %x\n", ++ next->id); ++ } else { ++ DRM_INFO("NV: PGRAPH context switch interrupt channel %x -> %x\n", ++ last->id, next->id); ++ } ++ ++/* NV_WRITE(NV03_PFIFO_CACHES, 0x0); ++ NV_WRITE(NV04_PFIFO_CACHE0_PULL0, 0x0); ++ NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x0);*/ ++ NV_WRITE(NV04_PGRAPH_FIFO,0x0); ++ ++ if (last) ++ nv04_graph_save_context(last); ++ ++ nouveau_wait_for_idle(dev); ++ ++ NV_WRITE(NV04_PGRAPH_CTX_CONTROL, 0x10000000); ++ NV_WRITE(NV04_PGRAPH_CTX_USER, (NV_READ(NV04_PGRAPH_CTX_USER) & 0xffffff) | (0x0f << 24)); ++ ++ nouveau_wait_for_idle(dev); ++ ++ nv04_graph_load_context(next); ++ ++ NV_WRITE(NV04_PGRAPH_CTX_CONTROL, 0x10010100); ++ NV_WRITE(NV04_PGRAPH_CTX_USER, next->id << 24); ++ NV_WRITE(NV04_PGRAPH_FFINTFC_ST2, NV_READ(NV04_PGRAPH_FFINTFC_ST2)&0x000FFFFF); ++ ++/* NV_WRITE(NV04_PGRAPH_FIFO,0x0); ++ NV_WRITE(NV04_PFIFO_CACHE0_PULL0, 0x0); ++ NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x1); ++ NV_WRITE(NV03_PFIFO_CACHES, 0x1);*/ ++ NV_WRITE(NV04_PGRAPH_FIFO,0x1); ++} ++ ++int nv04_graph_create_context(struct nouveau_channel *chan) { ++ struct graph_state* pgraph_ctx; ++ DRM_DEBUG("nv04_graph_context_create %d\n", chan->id); ++ ++ chan->pgraph_ctx = pgraph_ctx = drm_calloc(1, sizeof(*pgraph_ctx), ++ DRM_MEM_DRIVER); ++ ++ if (pgraph_ctx == NULL) ++ return -ENOMEM; ++ ++ //dev_priv->fifos[channel].pgraph_ctx_user = channel << 24; ++ pgraph_ctx->nv04[0] = 0x0001ffff; ++ /* is it really needed ??? */ ++ //dev_priv->fifos[channel].pgraph_ctx[1] = NV_READ(NV_PGRAPH_DEBUG_4); ++ //dev_priv->fifos[channel].pgraph_ctx[2] = NV_READ(0x004006b0); ++ ++ return 0; ++} ++ ++void nv04_graph_destroy_context(struct nouveau_channel *chan) ++{ ++ struct graph_state* pgraph_ctx = chan->pgraph_ctx; ++ ++ drm_free(pgraph_ctx, sizeof(*pgraph_ctx), DRM_MEM_DRIVER); ++ chan->pgraph_ctx = NULL; ++} ++ ++int nv04_graph_load_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct graph_state* pgraph_ctx = chan->pgraph_ctx; ++ int i; ++ ++ for (i = 0; i < sizeof(nv04_graph_ctx_regs)/sizeof(nv04_graph_ctx_regs[0]); i++) ++ NV_WRITE(nv04_graph_ctx_regs[i], pgraph_ctx->nv04[i]); ++ ++ return 0; ++} ++ ++int nv04_graph_save_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct graph_state* pgraph_ctx = chan->pgraph_ctx; ++ int i; ++ ++ for (i = 0; i < sizeof(nv04_graph_ctx_regs)/sizeof(nv04_graph_ctx_regs[0]); i++) ++ pgraph_ctx->nv04[i] = NV_READ(nv04_graph_ctx_regs[i]); ++ ++ return 0; ++} ++ ++int nv04_graph_init(struct drm_device *dev) { ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) & ++ ~NV_PMC_ENABLE_PGRAPH); ++ NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) | ++ NV_PMC_ENABLE_PGRAPH); ++ ++ /* Enable PGRAPH interrupts */ ++ NV_WRITE(NV03_PGRAPH_INTR, 0xFFFFFFFF); ++ NV_WRITE(NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); ++ ++ NV_WRITE(NV04_PGRAPH_VALID1, 0); ++ NV_WRITE(NV04_PGRAPH_VALID2, 0); ++ /*NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x000001FF); ++ NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x001FFFFF);*/ ++ NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x1231c000); ++ /*1231C000 blob, 001 haiku*/ ++ //*V_WRITE(NV04_PGRAPH_DEBUG_1, 0xf2d91100);*/ ++ NV_WRITE(NV04_PGRAPH_DEBUG_1, 0x72111100); ++ /*0x72111100 blob , 01 haiku*/ ++ /*NV_WRITE(NV04_PGRAPH_DEBUG_2, 0x11d5f870);*/ ++ NV_WRITE(NV04_PGRAPH_DEBUG_2, 0x11d5f071); ++ /*haiku same*/ ++ ++ /*NV_WRITE(NV04_PGRAPH_DEBUG_3, 0xfad4ff31);*/ ++ NV_WRITE(NV04_PGRAPH_DEBUG_3, 0xf0d4ff31); ++ /*haiku and blob 10d4*/ ++ ++ NV_WRITE(NV04_PGRAPH_STATE , 0xFFFFFFFF); ++ NV_WRITE(NV04_PGRAPH_CTX_CONTROL , 0x10010100); ++ NV_WRITE(NV04_PGRAPH_FIFO , 0x00000001); ++ ++ /* These don't belong here, they're part of a per-channel context */ ++ NV_WRITE(NV04_PGRAPH_PATTERN_SHAPE, 0x00000000); ++ NV_WRITE(NV04_PGRAPH_BETA_AND , 0xFFFFFFFF); ++ ++ return 0; ++} ++ ++void nv04_graph_takedown(struct drm_device *dev) ++{ ++} +diff --git a/drivers/gpu/drm/nouveau/nv04_instmem.c b/drivers/gpu/drm/nouveau/nv04_instmem.c +new file mode 100644 +index 0000000..804f9a7 +--- /dev/null ++++ b/drivers/gpu/drm/nouveau/nv04_instmem.c +@@ -0,0 +1,159 @@ ++#include "drmP.h" ++#include "drm.h" ++#include "nouveau_drv.h" ++ ++static void ++nv04_instmem_determine_amount(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int i; ++ ++ /* Figure out how much instance memory we need */ ++ if (dev_priv->card_type >= NV_40) { ++ /* We'll want more instance memory than this on some NV4x cards. ++ * There's a 16MB aperture to play with that maps onto the end ++ * of vram. For now, only reserve a small piece until we know ++ * more about what each chipset requires. ++ */ ++ dev_priv->ramin_rsvd_vram = (1*1024* 1024); ++ } else { ++ /*XXX: what *are* the limits on ramin_rsvd_vram = (512*1024); ++ } ++ DRM_DEBUG("RAMIN size: %dKiB\n", dev_priv->ramin_rsvd_vram>>10); ++ ++ /* Clear all of it, except the BIOS image that's in the first 64KiB */ ++ for (i=(64*1024); iramin_rsvd_vram; i+=4) ++ NV_WI32(i, 0x00000000); ++} ++ ++static void ++nv04_instmem_configure_fixed_tables(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_engine *engine = &dev_priv->Engine; ++ ++ /* FIFO hash table (RAMHT) ++ * use 4k hash table at RAMIN+0x10000 ++ * TODO: extend the hash table ++ */ ++ dev_priv->ramht_offset = 0x10000; ++ dev_priv->ramht_bits = 9; ++ dev_priv->ramht_size = (1 << dev_priv->ramht_bits); ++ DRM_DEBUG("RAMHT offset=0x%x, size=%d\n", dev_priv->ramht_offset, ++ dev_priv->ramht_size); ++ ++ /* FIFO runout table (RAMRO) - 512k at 0x11200 */ ++ dev_priv->ramro_offset = 0x11200; ++ dev_priv->ramro_size = 512; ++ DRM_DEBUG("RAMRO offset=0x%x, size=%d\n", dev_priv->ramro_offset, ++ dev_priv->ramro_size); ++ ++ /* FIFO context table (RAMFC) ++ * NV40 : Not sure exactly how to position RAMFC on some cards, ++ * 0x30002 seems to position it at RAMIN+0x20000 on these ++ * cards. RAMFC is 4kb (32 fifos, 128byte entries). ++ * Others: Position RAMFC at RAMIN+0x11400 ++ */ ++ switch(dev_priv->card_type) ++ { ++ case NV_40: ++ case NV_44: ++ dev_priv->ramfc_offset = 0x20000; ++ dev_priv->ramfc_size = engine->fifo.channels * ++ nouveau_fifo_ctx_size(dev); ++ break; ++ case NV_30: ++ case NV_20: ++ case NV_17: ++ case NV_11: ++ case NV_10: ++ case NV_04: ++ default: ++ dev_priv->ramfc_offset = 0x11400; ++ dev_priv->ramfc_size = engine->fifo.channels * ++ nouveau_fifo_ctx_size(dev); ++ break; ++ } ++ DRM_DEBUG("RAMFC offset=0x%x, size=%d\n", dev_priv->ramfc_offset, ++ dev_priv->ramfc_size); ++} ++ ++int nv04_instmem_init(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t offset; ++ int ret = 0; ++ ++ nv04_instmem_determine_amount(dev); ++ nv04_instmem_configure_fixed_tables(dev); ++ ++ /* Create a heap to manage RAMIN allocations, we don't allocate ++ * the space that was reserved for RAMHT/FC/RO. ++ */ ++ offset = dev_priv->ramfc_offset + dev_priv->ramfc_size; ++ ++ /* On my NV4E, there's *something* clobbering the 16KiB just after ++ * where we setup these fixed tables. No idea what it is just yet, ++ * so reserve this space on all NV4X cards for now. ++ */ ++ if (dev_priv->card_type >= NV_40) ++ offset += 16*1024; ++ ++ ret = nouveau_mem_init_heap(&dev_priv->ramin_heap, ++ offset, dev_priv->ramin_rsvd_vram - offset); ++ if (ret) { ++ dev_priv->ramin_heap = NULL; ++ DRM_ERROR("Failed to init RAMIN heap\n"); ++ } ++ ++ return ret; ++} ++ ++void ++nv04_instmem_takedown(struct drm_device *dev) ++{ ++} ++ ++int ++nv04_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, uint32_t *sz) ++{ ++ if (gpuobj->im_backing) ++ return -EINVAL; ++ ++ return 0; ++} ++ ++void ++nv04_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ if (gpuobj && gpuobj->im_backing) { ++ if (gpuobj->im_bound) ++ dev_priv->Engine.instmem.unbind(dev, gpuobj); ++ gpuobj->im_backing = NULL; ++ } ++} ++ ++int ++nv04_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) ++{ ++ if (!gpuobj->im_pramin || gpuobj->im_bound) ++ return -EINVAL; ++ ++ gpuobj->im_bound = 1; ++ return 0; ++} ++ ++int ++nv04_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) ++{ ++ if (gpuobj->im_bound == 0) ++ return -EINVAL; ++ ++ gpuobj->im_bound = 0; ++ return 0; ++} +diff --git a/drivers/gpu/drm/nouveau/nv04_mc.c b/drivers/gpu/drm/nouveau/nv04_mc.c +new file mode 100644 +index 0000000..24c1f7b +--- /dev/null ++++ b/drivers/gpu/drm/nouveau/nv04_mc.c +@@ -0,0 +1,22 @@ ++#include "drmP.h" ++#include "drm.h" ++#include "nouveau_drv.h" ++#include "nouveau_drm.h" ++ ++int ++nv04_mc_init(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ /* Power up everything, resetting each individual unit will ++ * be done later if needed. ++ */ ++ NV_WRITE(NV03_PMC_ENABLE, 0xFFFFFFFF); ++ ++ return 0; ++} ++ ++void ++nv04_mc_takedown(struct drm_device *dev) ++{ ++} +diff --git a/drivers/gpu/drm/nouveau/nv04_timer.c b/drivers/gpu/drm/nouveau/nv04_timer.c +new file mode 100644 +index 0000000..616f197 +--- /dev/null ++++ b/drivers/gpu/drm/nouveau/nv04_timer.c +@@ -0,0 +1,53 @@ ++#include "drmP.h" ++#include "drm.h" ++#include "nouveau_drv.h" ++#include "nouveau_drm.h" ++ ++int ++nv04_timer_init(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ NV_WRITE(NV04_PTIMER_INTR_EN_0, 0x00000000); ++ NV_WRITE(NV04_PTIMER_INTR_0, 0xFFFFFFFF); ++ ++ /* Just use the pre-existing values when possible for now; these regs ++ * are not written in nv (driver writer missed a /4 on the address), and ++ * writing 8 and 3 to the correct regs breaks the timings on the LVDS ++ * hardware sequencing microcode. ++ * A correct solution (involving calculations with the GPU PLL) can ++ * be done when kernel modesetting lands ++ */ ++ if (!NV_READ(NV04_PTIMER_NUMERATOR) || !NV_READ(NV04_PTIMER_DENOMINATOR)) { ++ NV_WRITE(NV04_PTIMER_NUMERATOR, 0x00000008); ++ NV_WRITE(NV04_PTIMER_DENOMINATOR, 0x00000003); ++ } ++ ++ return 0; ++} ++ ++uint64_t ++nv04_timer_read(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t low; ++ /* From kmmio dumps on nv28 this looks like how the blob does this. ++ * It reads the high dword twice, before and after. ++ * The only explanation seems to be that the 64-bit timer counter ++ * advances between high and low dword reads and may corrupt the ++ * result. Not confirmed. ++ */ ++ uint32_t high2 = NV_READ(NV04_PTIMER_TIME_1); ++ uint32_t high1; ++ do { ++ high1 = high2; ++ low = NV_READ(NV04_PTIMER_TIME_0); ++ high2 = NV_READ(NV04_PTIMER_TIME_1); ++ } while(high1 != high2); ++ return (((uint64_t)high2) << 32) | (uint64_t)low; ++} ++ ++void ++nv04_timer_takedown(struct drm_device *dev) ++{ ++} +diff --git a/drivers/gpu/drm/nouveau/nv10_fb.c b/drivers/gpu/drm/nouveau/nv10_fb.c +new file mode 100644 +index 0000000..6e0773a +--- /dev/null ++++ b/drivers/gpu/drm/nouveau/nv10_fb.c +@@ -0,0 +1,25 @@ ++#include "drmP.h" ++#include "drm.h" ++#include "nouveau_drv.h" ++#include "nouveau_drm.h" ++ ++int ++nv10_fb_init(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t fb_bar_size; ++ int i; ++ ++ fb_bar_size = drm_get_resource_len(dev, 0) - 1; ++ for (i=0; iramfc->gpuobj, \ ++ NV10_RAMFC_##offset/4, (val)) ++#define RAMFC_RD(offset) INSTANCE_RD(chan->ramfc->gpuobj, \ ++ NV10_RAMFC_##offset/4) ++#define NV10_RAMFC(c) (dev_priv->ramfc_offset + ((c) * NV10_RAMFC__SIZE)) ++#define NV10_RAMFC__SIZE ((dev_priv->chipset) >= 0x17 ? 64 : 32) ++ ++int ++nv10_fifo_channel_id(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ return (NV_READ(NV03_PFIFO_CACHE1_PUSH1) & ++ NV10_PFIFO_CACHE1_PUSH1_CHID_MASK); ++} ++ ++int ++nv10_fifo_create_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int ret; ++ ++ if ((ret = nouveau_gpuobj_new_fake(dev, NV10_RAMFC(chan->id), ~0, ++ NV10_RAMFC__SIZE, ++ NVOBJ_FLAG_ZERO_ALLOC | ++ NVOBJ_FLAG_ZERO_FREE, ++ NULL, &chan->ramfc))) ++ return ret; ++ ++ /* Fill entries that are seen filled in dumps of nvidia driver just ++ * after channel's is put into DMA mode ++ */ ++ RAMFC_WR(DMA_PUT , chan->pushbuf_base); ++ RAMFC_WR(DMA_GET , chan->pushbuf_base); ++ RAMFC_WR(DMA_INSTANCE , chan->pushbuf->instance >> 4); ++ RAMFC_WR(DMA_FETCH , NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | ++ NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | ++ NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 | ++#ifdef __BIG_ENDIAN ++ NV_PFIFO_CACHE1_BIG_ENDIAN | ++#endif ++ 0); ++ ++ /* enable the fifo dma operation */ ++ NV_WRITE(NV04_PFIFO_MODE,NV_READ(NV04_PFIFO_MODE)|(1<id)); ++ return 0; ++} ++ ++void ++nv10_fifo_destroy_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ NV_WRITE(NV04_PFIFO_MODE, NV_READ(NV04_PFIFO_MODE)&~(1<id)); ++ ++ nouveau_gpuobj_ref_del(dev, &chan->ramfc); ++} ++ ++int ++nv10_fifo_load_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t tmp; ++ ++ NV_WRITE(NV03_PFIFO_CACHE1_PUSH1, ++ NV03_PFIFO_CACHE1_PUSH1_DMA | chan->id); ++ ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_GET , RAMFC_RD(DMA_GET)); ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUT , RAMFC_RD(DMA_PUT)); ++ NV_WRITE(NV10_PFIFO_CACHE1_REF_CNT , RAMFC_RD(REF_CNT)); ++ ++ tmp = RAMFC_RD(DMA_INSTANCE); ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_INSTANCE , tmp & 0xFFFF); ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_DCOUNT , tmp >> 16); ++ ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_STATE , RAMFC_RD(DMA_STATE)); ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_FETCH , RAMFC_RD(DMA_FETCH)); ++ NV_WRITE(NV04_PFIFO_CACHE1_ENGINE , RAMFC_RD(ENGINE)); ++ NV_WRITE(NV04_PFIFO_CACHE1_PULL1 , RAMFC_RD(PULL1_ENGINE)); ++ ++ if (dev_priv->chipset >= 0x17) { ++ NV_WRITE(NV10_PFIFO_CACHE1_ACQUIRE_VALUE, ++ RAMFC_RD(ACQUIRE_VALUE)); ++ NV_WRITE(NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP, ++ RAMFC_RD(ACQUIRE_TIMESTAMP)); ++ NV_WRITE(NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT, ++ RAMFC_RD(ACQUIRE_TIMEOUT)); ++ NV_WRITE(NV10_PFIFO_CACHE1_SEMAPHORE, ++ RAMFC_RD(SEMAPHORE)); ++ NV_WRITE(NV10_PFIFO_CACHE1_DMA_SUBROUTINE, ++ RAMFC_RD(DMA_SUBROUTINE)); ++ } ++ ++ /* Reset NV04_PFIFO_CACHE1_DMA_CTL_AT_INFO to INVALID */ ++ tmp = NV_READ(NV04_PFIFO_CACHE1_DMA_CTL) & ~(1<<31); ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_CTL, tmp); ++ ++ return 0; ++} ++ ++int ++nv10_fifo_save_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t tmp; ++ ++ RAMFC_WR(DMA_PUT , NV_READ(NV04_PFIFO_CACHE1_DMA_PUT)); ++ RAMFC_WR(DMA_GET , NV_READ(NV04_PFIFO_CACHE1_DMA_GET)); ++ RAMFC_WR(REF_CNT , NV_READ(NV10_PFIFO_CACHE1_REF_CNT)); ++ ++ tmp = NV_READ(NV04_PFIFO_CACHE1_DMA_INSTANCE) & 0xFFFF; ++ tmp |= (NV_READ(NV04_PFIFO_CACHE1_DMA_DCOUNT) << 16); ++ RAMFC_WR(DMA_INSTANCE , tmp); ++ ++ RAMFC_WR(DMA_STATE , NV_READ(NV04_PFIFO_CACHE1_DMA_STATE)); ++ RAMFC_WR(DMA_FETCH , NV_READ(NV04_PFIFO_CACHE1_DMA_FETCH)); ++ RAMFC_WR(ENGINE , NV_READ(NV04_PFIFO_CACHE1_ENGINE)); ++ RAMFC_WR(PULL1_ENGINE , NV_READ(NV04_PFIFO_CACHE1_PULL1)); ++ ++ if (dev_priv->chipset >= 0x17) { ++ RAMFC_WR(ACQUIRE_VALUE, ++ NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_VALUE)); ++ RAMFC_WR(ACQUIRE_TIMESTAMP, ++ NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP)); ++ RAMFC_WR(ACQUIRE_TIMEOUT, ++ NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT)); ++ RAMFC_WR(SEMAPHORE, ++ NV_READ(NV10_PFIFO_CACHE1_SEMAPHORE)); ++ RAMFC_WR(DMA_SUBROUTINE, ++ NV_READ(NV04_PFIFO_CACHE1_DMA_GET)); ++ } ++ ++ return 0; ++} +diff --git a/drivers/gpu/drm/nouveau/nv10_graph.c b/drivers/gpu/drm/nouveau/nv10_graph.c +new file mode 100644 +index 0000000..9bf6c7e +--- /dev/null ++++ b/drivers/gpu/drm/nouveau/nv10_graph.c +@@ -0,0 +1,871 @@ ++/* ++ * Copyright 2007 Matthieu CASTET ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "nouveau_drm.h" ++#include "nouveau_drv.h" ++ ++#define NV10_FIFO_NUMBER 32 ++ ++struct pipe_state { ++ uint32_t pipe_0x0000[0x040/4]; ++ uint32_t pipe_0x0040[0x010/4]; ++ uint32_t pipe_0x0200[0x0c0/4]; ++ uint32_t pipe_0x4400[0x080/4]; ++ uint32_t pipe_0x6400[0x3b0/4]; ++ uint32_t pipe_0x6800[0x2f0/4]; ++ uint32_t pipe_0x6c00[0x030/4]; ++ uint32_t pipe_0x7000[0x130/4]; ++ uint32_t pipe_0x7400[0x0c0/4]; ++ uint32_t pipe_0x7800[0x0c0/4]; ++}; ++ ++static int nv10_graph_ctx_regs [] = { ++NV10_PGRAPH_CTX_SWITCH1, ++NV10_PGRAPH_CTX_SWITCH2, ++NV10_PGRAPH_CTX_SWITCH3, ++NV10_PGRAPH_CTX_SWITCH4, ++NV10_PGRAPH_CTX_SWITCH5, ++NV10_PGRAPH_CTX_CACHE1, /* 8 values from 0x400160 to 0x40017c */ ++NV10_PGRAPH_CTX_CACHE2, /* 8 values from 0x400180 to 0x40019c */ ++NV10_PGRAPH_CTX_CACHE3, /* 8 values from 0x4001a0 to 0x4001bc */ ++NV10_PGRAPH_CTX_CACHE4, /* 8 values from 0x4001c0 to 0x4001dc */ ++NV10_PGRAPH_CTX_CACHE5, /* 8 values from 0x4001e0 to 0x4001fc */ ++0x00400164, ++0x00400184, ++0x004001a4, ++0x004001c4, ++0x004001e4, ++0x00400168, ++0x00400188, ++0x004001a8, ++0x004001c8, ++0x004001e8, ++0x0040016c, ++0x0040018c, ++0x004001ac, ++0x004001cc, ++0x004001ec, ++0x00400170, ++0x00400190, ++0x004001b0, ++0x004001d0, ++0x004001f0, ++0x00400174, ++0x00400194, ++0x004001b4, ++0x004001d4, ++0x004001f4, ++0x00400178, ++0x00400198, ++0x004001b8, ++0x004001d8, ++0x004001f8, ++0x0040017c, ++0x0040019c, ++0x004001bc, ++0x004001dc, ++0x004001fc, ++NV10_PGRAPH_CTX_USER, ++NV04_PGRAPH_DMA_START_0, ++NV04_PGRAPH_DMA_START_1, ++NV04_PGRAPH_DMA_LENGTH, ++NV04_PGRAPH_DMA_MISC, ++NV10_PGRAPH_DMA_PITCH, ++NV04_PGRAPH_BOFFSET0, ++NV04_PGRAPH_BBASE0, ++NV04_PGRAPH_BLIMIT0, ++NV04_PGRAPH_BOFFSET1, ++NV04_PGRAPH_BBASE1, ++NV04_PGRAPH_BLIMIT1, ++NV04_PGRAPH_BOFFSET2, ++NV04_PGRAPH_BBASE2, ++NV04_PGRAPH_BLIMIT2, ++NV04_PGRAPH_BOFFSET3, ++NV04_PGRAPH_BBASE3, ++NV04_PGRAPH_BLIMIT3, ++NV04_PGRAPH_BOFFSET4, ++NV04_PGRAPH_BBASE4, ++NV04_PGRAPH_BLIMIT4, ++NV04_PGRAPH_BOFFSET5, ++NV04_PGRAPH_BBASE5, ++NV04_PGRAPH_BLIMIT5, ++NV04_PGRAPH_BPITCH0, ++NV04_PGRAPH_BPITCH1, ++NV04_PGRAPH_BPITCH2, ++NV04_PGRAPH_BPITCH3, ++NV04_PGRAPH_BPITCH4, ++NV10_PGRAPH_SURFACE, ++NV10_PGRAPH_STATE, ++NV04_PGRAPH_BSWIZZLE2, ++NV04_PGRAPH_BSWIZZLE5, ++NV04_PGRAPH_BPIXEL, ++NV10_PGRAPH_NOTIFY, ++NV04_PGRAPH_PATT_COLOR0, ++NV04_PGRAPH_PATT_COLOR1, ++NV04_PGRAPH_PATT_COLORRAM, /* 64 values from 0x400900 to 0x4009fc */ ++0x00400904, ++0x00400908, ++0x0040090c, ++0x00400910, ++0x00400914, ++0x00400918, ++0x0040091c, ++0x00400920, ++0x00400924, ++0x00400928, ++0x0040092c, ++0x00400930, ++0x00400934, ++0x00400938, ++0x0040093c, ++0x00400940, ++0x00400944, ++0x00400948, ++0x0040094c, ++0x00400950, ++0x00400954, ++0x00400958, ++0x0040095c, ++0x00400960, ++0x00400964, ++0x00400968, ++0x0040096c, ++0x00400970, ++0x00400974, ++0x00400978, ++0x0040097c, ++0x00400980, ++0x00400984, ++0x00400988, ++0x0040098c, ++0x00400990, ++0x00400994, ++0x00400998, ++0x0040099c, ++0x004009a0, ++0x004009a4, ++0x004009a8, ++0x004009ac, ++0x004009b0, ++0x004009b4, ++0x004009b8, ++0x004009bc, ++0x004009c0, ++0x004009c4, ++0x004009c8, ++0x004009cc, ++0x004009d0, ++0x004009d4, ++0x004009d8, ++0x004009dc, ++0x004009e0, ++0x004009e4, ++0x004009e8, ++0x004009ec, ++0x004009f0, ++0x004009f4, ++0x004009f8, ++0x004009fc, ++NV04_PGRAPH_PATTERN, /* 2 values from 0x400808 to 0x40080c */ ++0x0040080c, ++NV04_PGRAPH_PATTERN_SHAPE, ++NV03_PGRAPH_MONO_COLOR0, ++NV04_PGRAPH_ROP3, ++NV04_PGRAPH_CHROMA, ++NV04_PGRAPH_BETA_AND, ++NV04_PGRAPH_BETA_PREMULT, ++0x00400e70, ++0x00400e74, ++0x00400e78, ++0x00400e7c, ++0x00400e80, ++0x00400e84, ++0x00400e88, ++0x00400e8c, ++0x00400ea0, ++0x00400ea4, ++0x00400ea8, ++0x00400e90, ++0x00400e94, ++0x00400e98, ++0x00400e9c, ++NV10_PGRAPH_WINDOWCLIP_HORIZONTAL, /* 8 values from 0x400f00 to 0x400f1c */ ++NV10_PGRAPH_WINDOWCLIP_VERTICAL, /* 8 values from 0x400f20 to 0x400f3c */ ++0x00400f04, ++0x00400f24, ++0x00400f08, ++0x00400f28, ++0x00400f0c, ++0x00400f2c, ++0x00400f10, ++0x00400f30, ++0x00400f14, ++0x00400f34, ++0x00400f18, ++0x00400f38, ++0x00400f1c, ++0x00400f3c, ++NV10_PGRAPH_XFMODE0, ++NV10_PGRAPH_XFMODE1, ++NV10_PGRAPH_GLOBALSTATE0, ++NV10_PGRAPH_GLOBALSTATE1, ++NV04_PGRAPH_STORED_FMT, ++NV04_PGRAPH_SOURCE_COLOR, ++NV03_PGRAPH_ABS_X_RAM, /* 32 values from 0x400400 to 0x40047c */ ++NV03_PGRAPH_ABS_Y_RAM, /* 32 values from 0x400480 to 0x4004fc */ ++0x00400404, ++0x00400484, ++0x00400408, ++0x00400488, ++0x0040040c, ++0x0040048c, ++0x00400410, ++0x00400490, ++0x00400414, ++0x00400494, ++0x00400418, ++0x00400498, ++0x0040041c, ++0x0040049c, ++0x00400420, ++0x004004a0, ++0x00400424, ++0x004004a4, ++0x00400428, ++0x004004a8, ++0x0040042c, ++0x004004ac, ++0x00400430, ++0x004004b0, ++0x00400434, ++0x004004b4, ++0x00400438, ++0x004004b8, ++0x0040043c, ++0x004004bc, ++0x00400440, ++0x004004c0, ++0x00400444, ++0x004004c4, ++0x00400448, ++0x004004c8, ++0x0040044c, ++0x004004cc, ++0x00400450, ++0x004004d0, ++0x00400454, ++0x004004d4, ++0x00400458, ++0x004004d8, ++0x0040045c, ++0x004004dc, ++0x00400460, ++0x004004e0, ++0x00400464, ++0x004004e4, ++0x00400468, ++0x004004e8, ++0x0040046c, ++0x004004ec, ++0x00400470, ++0x004004f0, ++0x00400474, ++0x004004f4, ++0x00400478, ++0x004004f8, ++0x0040047c, ++0x004004fc, ++NV03_PGRAPH_ABS_UCLIP_XMIN, ++NV03_PGRAPH_ABS_UCLIP_XMAX, ++NV03_PGRAPH_ABS_UCLIP_YMIN, ++NV03_PGRAPH_ABS_UCLIP_YMAX, ++0x00400550, ++0x00400558, ++0x00400554, ++0x0040055c, ++NV03_PGRAPH_ABS_UCLIPA_XMIN, ++NV03_PGRAPH_ABS_UCLIPA_XMAX, ++NV03_PGRAPH_ABS_UCLIPA_YMIN, ++NV03_PGRAPH_ABS_UCLIPA_YMAX, ++NV03_PGRAPH_ABS_ICLIP_XMAX, ++NV03_PGRAPH_ABS_ICLIP_YMAX, ++NV03_PGRAPH_XY_LOGIC_MISC0, ++NV03_PGRAPH_XY_LOGIC_MISC1, ++NV03_PGRAPH_XY_LOGIC_MISC2, ++NV03_PGRAPH_XY_LOGIC_MISC3, ++NV03_PGRAPH_CLIPX_0, ++NV03_PGRAPH_CLIPX_1, ++NV03_PGRAPH_CLIPY_0, ++NV03_PGRAPH_CLIPY_1, ++NV10_PGRAPH_COMBINER0_IN_ALPHA, ++NV10_PGRAPH_COMBINER1_IN_ALPHA, ++NV10_PGRAPH_COMBINER0_IN_RGB, ++NV10_PGRAPH_COMBINER1_IN_RGB, ++NV10_PGRAPH_COMBINER_COLOR0, ++NV10_PGRAPH_COMBINER_COLOR1, ++NV10_PGRAPH_COMBINER0_OUT_ALPHA, ++NV10_PGRAPH_COMBINER1_OUT_ALPHA, ++NV10_PGRAPH_COMBINER0_OUT_RGB, ++NV10_PGRAPH_COMBINER1_OUT_RGB, ++NV10_PGRAPH_COMBINER_FINAL0, ++NV10_PGRAPH_COMBINER_FINAL1, ++0x00400e00, ++0x00400e04, ++0x00400e08, ++0x00400e0c, ++0x00400e10, ++0x00400e14, ++0x00400e18, ++0x00400e1c, ++0x00400e20, ++0x00400e24, ++0x00400e28, ++0x00400e2c, ++0x00400e30, ++0x00400e34, ++0x00400e38, ++0x00400e3c, ++NV04_PGRAPH_PASSTHRU_0, ++NV04_PGRAPH_PASSTHRU_1, ++NV04_PGRAPH_PASSTHRU_2, ++NV10_PGRAPH_DIMX_TEXTURE, ++NV10_PGRAPH_WDIMX_TEXTURE, ++NV10_PGRAPH_DVD_COLORFMT, ++NV10_PGRAPH_SCALED_FORMAT, ++NV04_PGRAPH_MISC24_0, ++NV04_PGRAPH_MISC24_1, ++NV04_PGRAPH_MISC24_2, ++NV03_PGRAPH_X_MISC, ++NV03_PGRAPH_Y_MISC, ++NV04_PGRAPH_VALID1, ++NV04_PGRAPH_VALID2, ++}; ++ ++static int nv17_graph_ctx_regs [] = { ++NV10_PGRAPH_DEBUG_4, ++0x004006b0, ++0x00400eac, ++0x00400eb0, ++0x00400eb4, ++0x00400eb8, ++0x00400ebc, ++0x00400ec0, ++0x00400ec4, ++0x00400ec8, ++0x00400ecc, ++0x00400ed0, ++0x00400ed4, ++0x00400ed8, ++0x00400edc, ++0x00400ee0, ++0x00400a00, ++0x00400a04, ++}; ++ ++struct graph_state { ++ int nv10[sizeof(nv10_graph_ctx_regs)/sizeof(nv10_graph_ctx_regs[0])]; ++ int nv17[sizeof(nv17_graph_ctx_regs)/sizeof(nv17_graph_ctx_regs[0])]; ++ struct pipe_state pipe_state; ++}; ++ ++static void nv10_graph_save_pipe(struct nouveau_channel *chan) { ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct graph_state* pgraph_ctx = chan->pgraph_ctx; ++ struct pipe_state *fifo_pipe_state = &pgraph_ctx->pipe_state; ++ int i; ++#define PIPE_SAVE(addr) \ ++ do { \ ++ NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, addr); \ ++ for (i=0; i < sizeof(fifo_pipe_state->pipe_##addr)/sizeof(fifo_pipe_state->pipe_##addr[0]); i++) \ ++ fifo_pipe_state->pipe_##addr[i] = NV_READ(NV10_PGRAPH_PIPE_DATA); \ ++ } while (0) ++ ++ PIPE_SAVE(0x4400); ++ PIPE_SAVE(0x0200); ++ PIPE_SAVE(0x6400); ++ PIPE_SAVE(0x6800); ++ PIPE_SAVE(0x6c00); ++ PIPE_SAVE(0x7000); ++ PIPE_SAVE(0x7400); ++ PIPE_SAVE(0x7800); ++ PIPE_SAVE(0x0040); ++ PIPE_SAVE(0x0000); ++ ++#undef PIPE_SAVE ++} ++ ++static void nv10_graph_load_pipe(struct nouveau_channel *chan) { ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct graph_state* pgraph_ctx = chan->pgraph_ctx; ++ struct pipe_state *fifo_pipe_state = &pgraph_ctx->pipe_state; ++ int i; ++ uint32_t xfmode0, xfmode1; ++#define PIPE_RESTORE(addr) \ ++ do { \ ++ NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, addr); \ ++ for (i=0; i < sizeof(fifo_pipe_state->pipe_##addr)/sizeof(fifo_pipe_state->pipe_##addr[0]); i++) \ ++ NV_WRITE(NV10_PGRAPH_PIPE_DATA, fifo_pipe_state->pipe_##addr[i]); \ ++ } while (0) ++ ++ ++ nouveau_wait_for_idle(dev); ++ /* XXX check haiku comments */ ++ xfmode0 = NV_READ(NV10_PGRAPH_XFMODE0); ++ xfmode1 = NV_READ(NV10_PGRAPH_XFMODE1); ++ NV_WRITE(NV10_PGRAPH_XFMODE0, 0x10000000); ++ NV_WRITE(NV10_PGRAPH_XFMODE1, 0x00000000); ++ NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0); ++ for (i = 0; i < 4; i++) ++ NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x3f800000); ++ for (i = 0; i < 4; i++) ++ NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000); ++ ++ NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0); ++ for (i = 0; i < 3; i++) ++ NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x3f800000); ++ ++ NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80); ++ for (i = 0; i < 3; i++) ++ NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000); ++ ++ NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, 0x00000040); ++ NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000008); ++ ++ ++ PIPE_RESTORE(0x0200); ++ nouveau_wait_for_idle(dev); ++ ++ /* restore XFMODE */ ++ NV_WRITE(NV10_PGRAPH_XFMODE0, xfmode0); ++ NV_WRITE(NV10_PGRAPH_XFMODE1, xfmode1); ++ PIPE_RESTORE(0x6400); ++ PIPE_RESTORE(0x6800); ++ PIPE_RESTORE(0x6c00); ++ PIPE_RESTORE(0x7000); ++ PIPE_RESTORE(0x7400); ++ PIPE_RESTORE(0x7800); ++ PIPE_RESTORE(0x4400); ++ PIPE_RESTORE(0x0000); ++ PIPE_RESTORE(0x0040); ++ nouveau_wait_for_idle(dev); ++ ++#undef PIPE_RESTORE ++} ++ ++static void nv10_graph_create_pipe(struct nouveau_channel *chan) { ++ struct graph_state* pgraph_ctx = chan->pgraph_ctx; ++ struct pipe_state *fifo_pipe_state = &pgraph_ctx->pipe_state; ++ uint32_t *fifo_pipe_state_addr; ++ int i; ++#define PIPE_INIT(addr) \ ++ do { \ ++ fifo_pipe_state_addr = fifo_pipe_state->pipe_##addr; \ ++ } while (0) ++#define PIPE_INIT_END(addr) \ ++ do { \ ++ if (fifo_pipe_state_addr != \ ++ sizeof(fifo_pipe_state->pipe_##addr)/sizeof(fifo_pipe_state->pipe_##addr[0]) + fifo_pipe_state->pipe_##addr) \ ++ DRM_ERROR("incomplete pipe init for 0x%x : %p/%p\n", addr, fifo_pipe_state_addr, \ ++ sizeof(fifo_pipe_state->pipe_##addr)/sizeof(fifo_pipe_state->pipe_##addr[0]) + fifo_pipe_state->pipe_##addr); \ ++ } while (0) ++#define NV_WRITE_PIPE_INIT(value) *(fifo_pipe_state_addr++) = value ++ ++ PIPE_INIT(0x0200); ++ for (i = 0; i < 48; i++) ++ NV_WRITE_PIPE_INIT(0x00000000); ++ PIPE_INIT_END(0x0200); ++ ++ PIPE_INIT(0x6400); ++ for (i = 0; i < 211; i++) ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x3f800000); ++ NV_WRITE_PIPE_INIT(0x40000000); ++ NV_WRITE_PIPE_INIT(0x40000000); ++ NV_WRITE_PIPE_INIT(0x40000000); ++ NV_WRITE_PIPE_INIT(0x40000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x3f800000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x3f000000); ++ NV_WRITE_PIPE_INIT(0x3f000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x3f800000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x3f800000); ++ NV_WRITE_PIPE_INIT(0x3f800000); ++ NV_WRITE_PIPE_INIT(0x3f800000); ++ NV_WRITE_PIPE_INIT(0x3f800000); ++ PIPE_INIT_END(0x6400); ++ ++ PIPE_INIT(0x6800); ++ for (i = 0; i < 162; i++) ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x3f800000); ++ for (i = 0; i < 25; i++) ++ NV_WRITE_PIPE_INIT(0x00000000); ++ PIPE_INIT_END(0x6800); ++ ++ PIPE_INIT(0x6c00); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0xbf800000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ PIPE_INIT_END(0x6c00); ++ ++ PIPE_INIT(0x7000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x7149f2ca); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x7149f2ca); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x7149f2ca); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x7149f2ca); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x7149f2ca); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x7149f2ca); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x7149f2ca); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x7149f2ca); ++ for (i = 0; i < 35; i++) ++ NV_WRITE_PIPE_INIT(0x00000000); ++ PIPE_INIT_END(0x7000); ++ ++ PIPE_INIT(0x7400); ++ for (i = 0; i < 48; i++) ++ NV_WRITE_PIPE_INIT(0x00000000); ++ PIPE_INIT_END(0x7400); ++ ++ PIPE_INIT(0x7800); ++ for (i = 0; i < 48; i++) ++ NV_WRITE_PIPE_INIT(0x00000000); ++ PIPE_INIT_END(0x7800); ++ ++ PIPE_INIT(0x4400); ++ for (i = 0; i < 32; i++) ++ NV_WRITE_PIPE_INIT(0x00000000); ++ PIPE_INIT_END(0x4400); ++ ++ PIPE_INIT(0x0000); ++ for (i = 0; i < 16; i++) ++ NV_WRITE_PIPE_INIT(0x00000000); ++ PIPE_INIT_END(0x0000); ++ ++ PIPE_INIT(0x0040); ++ for (i = 0; i < 4; i++) ++ NV_WRITE_PIPE_INIT(0x00000000); ++ PIPE_INIT_END(0x0040); ++ ++#undef PIPE_INIT ++#undef PIPE_INIT_END ++#undef NV_WRITE_PIPE_INIT ++} ++ ++static int nv10_graph_ctx_regs_find_offset(struct drm_device *dev, int reg) ++{ ++ int i; ++ for (i = 0; i < sizeof(nv10_graph_ctx_regs)/sizeof(nv10_graph_ctx_regs[0]); i++) { ++ if (nv10_graph_ctx_regs[i] == reg) ++ return i; ++ } ++ DRM_ERROR("unknow offset nv10_ctx_regs %d\n", reg); ++ return -1; ++} ++ ++static int nv17_graph_ctx_regs_find_offset(struct drm_device *dev, int reg) ++{ ++ int i; ++ for (i = 0; i < sizeof(nv17_graph_ctx_regs)/sizeof(nv17_graph_ctx_regs[0]); i++) { ++ if (nv17_graph_ctx_regs[i] == reg) ++ return i; ++ } ++ DRM_ERROR("unknow offset nv17_ctx_regs %d\n", reg); ++ return -1; ++} ++ ++int nv10_graph_load_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct graph_state* pgraph_ctx = chan->pgraph_ctx; ++ int i; ++ ++ for (i = 0; i < sizeof(nv10_graph_ctx_regs)/sizeof(nv10_graph_ctx_regs[0]); i++) ++ NV_WRITE(nv10_graph_ctx_regs[i], pgraph_ctx->nv10[i]); ++ if (dev_priv->chipset>=0x17) { ++ for (i = 0; i < sizeof(nv17_graph_ctx_regs)/sizeof(nv17_graph_ctx_regs[0]); i++) ++ NV_WRITE(nv17_graph_ctx_regs[i], pgraph_ctx->nv17[i]); ++ } ++ ++ nv10_graph_load_pipe(chan); ++ ++ return 0; ++} ++ ++int nv10_graph_save_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct graph_state* pgraph_ctx = chan->pgraph_ctx; ++ int i; ++ ++ for (i = 0; i < sizeof(nv10_graph_ctx_regs)/sizeof(nv10_graph_ctx_regs[0]); i++) ++ pgraph_ctx->nv10[i] = NV_READ(nv10_graph_ctx_regs[i]); ++ if (dev_priv->chipset>=0x17) { ++ for (i = 0; i < sizeof(nv17_graph_ctx_regs)/sizeof(nv17_graph_ctx_regs[0]); i++) ++ pgraph_ctx->nv17[i] = NV_READ(nv17_graph_ctx_regs[i]); ++ } ++ ++ nv10_graph_save_pipe(chan); ++ ++ return 0; ++} ++ ++void nouveau_nv10_context_switch(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv; ++ struct nouveau_engine *engine; ++ struct nouveau_channel *next, *last; ++ int chid; ++ ++ if (!dev) { ++ DRM_DEBUG("Invalid drm_device\n"); ++ return; ++ } ++ dev_priv = dev->dev_private; ++ if (!dev_priv) { ++ DRM_DEBUG("Invalid drm_nouveau_private\n"); ++ return; ++ } ++ if (!dev_priv->fifos) { ++ DRM_DEBUG("Invalid drm_nouveau_private->fifos\n"); ++ return; ++ } ++ engine = &dev_priv->Engine; ++ ++ chid = (NV_READ(NV04_PGRAPH_TRAPPED_ADDR) >> 20) & ++ (engine->fifo.channels - 1); ++ next = dev_priv->fifos[chid]; ++ ++ if (!next) { ++ DRM_ERROR("Invalid next channel\n"); ++ return; ++ } ++ ++ chid = (NV_READ(NV10_PGRAPH_CTX_USER) >> 24) & ++ (engine->fifo.channels - 1); ++ last = dev_priv->fifos[chid]; ++ ++ if (!last) { ++ DRM_INFO("WARNING: Invalid last channel, switch to %x\n", ++ next->id); ++ } else { ++ DRM_DEBUG("NV: PGRAPH context switch interrupt channel %x -> %x\n", ++ last->id, next->id); ++ } ++ ++ NV_WRITE(NV04_PGRAPH_FIFO,0x0); ++ if (last) { ++ nouveau_wait_for_idle(dev); ++ nv10_graph_save_context(last); ++ } ++ ++ nouveau_wait_for_idle(dev); ++ ++ NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10000000); ++ ++ nouveau_wait_for_idle(dev); ++ ++ nv10_graph_load_context(next); ++ ++ NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10010100); ++ NV_WRITE(NV10_PGRAPH_FFINTFC_ST2, NV_READ(NV10_PGRAPH_FFINTFC_ST2)&0xCFFFFFFF); ++ NV_WRITE(NV04_PGRAPH_FIFO,0x1); ++} ++ ++#define NV_WRITE_CTX(reg, val) do { \ ++ int offset = nv10_graph_ctx_regs_find_offset(dev, reg); \ ++ if (offset > 0) \ ++ pgraph_ctx->nv10[offset] = val; \ ++ } while (0) ++ ++#define NV17_WRITE_CTX(reg, val) do { \ ++ int offset = nv17_graph_ctx_regs_find_offset(dev, reg); \ ++ if (offset > 0) \ ++ pgraph_ctx->nv17[offset] = val; \ ++ } while (0) ++ ++int nv10_graph_create_context(struct nouveau_channel *chan) { ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct graph_state* pgraph_ctx; ++ ++ DRM_DEBUG("nv10_graph_context_create %d\n", chan->id); ++ ++ chan->pgraph_ctx = pgraph_ctx = drm_calloc(1, sizeof(*pgraph_ctx), ++ DRM_MEM_DRIVER); ++ ++ if (pgraph_ctx == NULL) ++ return -ENOMEM; ++ ++ /* mmio trace suggest that should be done in ddx with methods/objects */ ++ ++ NV_WRITE_CTX(0x00400e88, 0x08000000); ++ NV_WRITE_CTX(0x00400e9c, 0x4b7fffff); ++ NV_WRITE_CTX(NV03_PGRAPH_XY_LOGIC_MISC0, 0x0001ffff); ++ NV_WRITE_CTX(0x00400e10, 0x00001000); ++ NV_WRITE_CTX(0x00400e14, 0x00001000); ++ NV_WRITE_CTX(0x00400e30, 0x00080008); ++ NV_WRITE_CTX(0x00400e34, 0x00080008); ++ if (dev_priv->chipset>=0x17) { ++ /* is it really needed ??? */ ++ NV17_WRITE_CTX(NV10_PGRAPH_DEBUG_4, NV_READ(NV10_PGRAPH_DEBUG_4)); ++ NV17_WRITE_CTX(0x004006b0, NV_READ(0x004006b0)); ++ NV17_WRITE_CTX(0x00400eac, 0x0fff0000); ++ NV17_WRITE_CTX(0x00400eb0, 0x0fff0000); ++ NV17_WRITE_CTX(0x00400ec0, 0x00000080); ++ NV17_WRITE_CTX(0x00400ed0, 0x00000080); ++ } ++ NV_WRITE_CTX(NV10_PGRAPH_CTX_USER, chan->id << 24); ++ ++ nv10_graph_create_pipe(chan); ++ return 0; ++} ++ ++void nv10_graph_destroy_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_engine *engine = &dev_priv->Engine; ++ struct graph_state* pgraph_ctx = chan->pgraph_ctx; ++ int chid; ++ ++ drm_free(pgraph_ctx, sizeof(*pgraph_ctx), DRM_MEM_DRIVER); ++ chan->pgraph_ctx = NULL; ++ ++ chid = (NV_READ(NV10_PGRAPH_CTX_USER) >> 24) & (engine->fifo.channels - 1); ++ ++ /* This code seems to corrupt the 3D pipe, but blob seems to do similar things ???? ++ */ ++ if (chid == chan->id) { ++ DRM_INFO("cleanning a channel with graph in current context\n"); ++ } ++} ++ ++int nv10_graph_init(struct drm_device *dev) { ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int i; ++ ++ NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) & ++ ~NV_PMC_ENABLE_PGRAPH); ++ NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) | ++ NV_PMC_ENABLE_PGRAPH); ++ ++ NV_WRITE(NV03_PGRAPH_INTR , 0xFFFFFFFF); ++ NV_WRITE(NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); ++ ++ NV_WRITE(NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF); ++ NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x00000000); ++ NV_WRITE(NV04_PGRAPH_DEBUG_1, 0x00118700); ++ //NV_WRITE(NV04_PGRAPH_DEBUG_2, 0x24E00810); /* 0x25f92ad9 */ ++ NV_WRITE(NV04_PGRAPH_DEBUG_2, 0x25f92ad9); ++ NV_WRITE(NV04_PGRAPH_DEBUG_3, 0x55DE0830 | ++ (1<<29) | ++ (1<<31)); ++ if (dev_priv->chipset>=0x17) { ++ NV_WRITE(NV10_PGRAPH_DEBUG_4, 0x1f000000); ++ NV_WRITE(0x004006b0, 0x40000020); ++ } ++ else ++ NV_WRITE(NV10_PGRAPH_DEBUG_4, 0x00000000); ++ ++ /* copy tile info from PFB */ ++ for (i=0; idev_private; ++ int i; ++/* ++write32 #1 block at +0x00740adc NV_PRAMIN+0x40adc of 3369 (0xd29) elements: +++0x00740adc: ffff0000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +++0x00740afc: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +++0x00740b1c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +++0x00740b3c: 00000000 0fff0000 0fff0000 00000000 00000000 00000000 00000000 00000000 +++0x00740b5c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +++0x00740b7c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +++0x00740b9c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +++0x00740bbc: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +++0x00740bdc: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +++0x00740bfc: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 ++ +++0x00740c1c: 00000101 00000000 00000000 00000000 00000000 00000111 00000000 00000000 +++0x00740c3c: 00000000 00000000 00000000 44400000 00000000 00000000 00000000 00000000 +++0x00740c5c: 00000000 00000000 00000000 00000000 00000000 00000000 00030303 00030303 +++0x00740c7c: 00030303 00030303 00000000 00000000 00000000 00000000 00080000 00080000 +++0x00740c9c: 00080000 00080000 00000000 00000000 01012000 01012000 01012000 01012000 +++0x00740cbc: 000105b8 000105b8 000105b8 000105b8 00080008 00080008 00080008 00080008 +++0x00740cdc: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +++0x00740cfc: 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 +++0x00740d1c: 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 +++0x00740d3c: 00000000 00000000 4b7fffff 00000000 00000000 00000000 00000000 00000000 ++ +++0x00740d5c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +++0x00740d7c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +++0x00740d9c: 00000001 00000000 00004000 00000000 00000000 00000001 00000000 00040000 +++0x00740dbc: 00010000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +++0x00740ddc: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 ++... ++*/ ++ INSTANCE_WR(ctx, (0x33c/4)+0, 0xffff0000); ++ INSTANCE_WR(ctx, (0x33c/4)+25, 0x0fff0000); ++ INSTANCE_WR(ctx, (0x33c/4)+26, 0x0fff0000); ++ INSTANCE_WR(ctx, (0x33c/4)+80, 0x00000101); ++ INSTANCE_WR(ctx, (0x33c/4)+85, 0x00000111); ++ INSTANCE_WR(ctx, (0x33c/4)+91, 0x44400000); ++ for (i = 0; i < 4; ++i) ++ INSTANCE_WR(ctx, (0x33c/4)+102+i, 0x00030303); ++ for (i = 0; i < 4; ++i) ++ INSTANCE_WR(ctx, (0x33c/4)+110+i, 0x00080000); ++ for (i = 0; i < 4; ++i) ++ INSTANCE_WR(ctx, (0x33c/4)+116+i, 0x01012000); ++ for (i = 0; i < 4; ++i) ++ INSTANCE_WR(ctx, (0x33c/4)+120+i, 0x000105b8); ++ for (i = 0; i < 4; ++i) ++ INSTANCE_WR(ctx, (0x33c/4)+124+i, 0x00080008); ++ for (i = 0; i < 16; ++i) ++ INSTANCE_WR(ctx, (0x33c/4)+136+i, 0x07ff0000); ++ INSTANCE_WR(ctx, (0x33c/4)+154, 0x4b7fffff); ++ INSTANCE_WR(ctx, (0x33c/4)+176, 0x00000001); ++ INSTANCE_WR(ctx, (0x33c/4)+178, 0x00004000); ++ INSTANCE_WR(ctx, (0x33c/4)+181, 0x00000001); ++ INSTANCE_WR(ctx, (0x33c/4)+183, 0x00040000); ++ INSTANCE_WR(ctx, (0x33c/4)+184, 0x00010000); ++ ++/* ++... +++0x0074239c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +++0x007423bc: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000 +++0x007423dc: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000 +++0x007423fc: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000 ++... +++0x00742bdc: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000 +++0x00742bfc: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000 +++0x00742c1c: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000 +++0x00742c3c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 ++... ++*/ ++ for (i = 0; i < 0x880; i += 0x10) { ++ INSTANCE_WR(ctx, ((0x1c1c + i)/4)+0, 0x10700ff9); ++ INSTANCE_WR(ctx, ((0x1c1c + i)/4)+1, 0x0436086c); ++ INSTANCE_WR(ctx, ((0x1c1c + i)/4)+2, 0x000c001b); ++ } ++ ++/* ++write32 #1 block at +0x00742fbc NV_PRAMIN+0x42fbc of 4 (0x4) elements: +++0x00742fbc: 3f800000 00000000 00000000 00000000 ++*/ ++ INSTANCE_WR(ctx, (0x281c/4), 0x3f800000); ++ ++/* ++write32 #1 block at +0x00742ffc NV_PRAMIN+0x42ffc of 12 (0xc) elements: +++0x00742ffc: 40000000 3f800000 3f000000 00000000 40000000 3f800000 00000000 bf800000 +++0x0074301c: 00000000 bf800000 00000000 00000000 ++*/ ++ INSTANCE_WR(ctx, (0x285c/4)+0, 0x40000000); ++ INSTANCE_WR(ctx, (0x285c/4)+1, 0x3f800000); ++ INSTANCE_WR(ctx, (0x285c/4)+2, 0x3f000000); ++ INSTANCE_WR(ctx, (0x285c/4)+4, 0x40000000); ++ INSTANCE_WR(ctx, (0x285c/4)+5, 0x3f800000); ++ INSTANCE_WR(ctx, (0x285c/4)+7, 0xbf800000); ++ INSTANCE_WR(ctx, (0x285c/4)+9, 0xbf800000); ++ ++/* ++write32 #1 block at +0x00742fcc NV_PRAMIN+0x42fcc of 4 (0x4) elements: +++0x00742fcc: 00000000 3f800000 00000000 00000000 ++*/ ++ INSTANCE_WR(ctx, (0x282c/4)+1, 0x3f800000); ++ ++/* ++write32 #1 block at +0x0074302c NV_PRAMIN+0x4302c of 4 (0x4) elements: +++0x0074302c: 00000000 00000000 00000000 00000000 ++write32 #1 block at +0x00743c9c NV_PRAMIN+0x43c9c of 4 (0x4) elements: +++0x00743c9c: 00000000 00000000 00000000 00000000 ++write32 #1 block at +0x00743c3c NV_PRAMIN+0x43c3c of 8 (0x8) elements: +++0x00743c3c: 00000000 00000000 000fe000 00000000 00000000 00000000 00000000 00000000 ++*/ ++ INSTANCE_WR(ctx, (0x349c/4)+2, 0x000fe000); ++ ++/* ++write32 #1 block at +0x00743c6c NV_PRAMIN+0x43c6c of 4 (0x4) elements: +++0x00743c6c: 00000000 00000000 00000000 00000000 ++write32 #1 block at +0x00743ccc NV_PRAMIN+0x43ccc of 4 (0x4) elements: +++0x00743ccc: 00000000 000003f8 00000000 00000000 ++*/ ++ INSTANCE_WR(ctx, (0x352c/4)+1, 0x000003f8); ++ ++/* write32 #1 NV_PRAMIN+0x43ce0 <- 0x002fe000 */ ++ INSTANCE_WR(ctx, 0x3540/4, 0x002fe000); ++ ++/* ++write32 #1 block at +0x00743cfc NV_PRAMIN+0x43cfc of 8 (0x8) elements: +++0x00743cfc: 001c527c 001c527c 001c527c 001c527c 001c527c 001c527c 001c527c 001c527c ++*/ ++ for (i = 0; i < 8; ++i) ++ INSTANCE_WR(ctx, (0x355c/4)+i, 0x001c527c); ++} ++ ++static void nv2a_graph_context_init(struct drm_device *dev, ++ struct nouveau_gpuobj *ctx) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int i; ++ ++ INSTANCE_WR(ctx, 0x33c/4, 0xffff0000); ++ for(i = 0x3a0; i< 0x3a8; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x0fff0000); ++ INSTANCE_WR(ctx, 0x47c/4, 0x00000101); ++ INSTANCE_WR(ctx, 0x490/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x4a8/4, 0x44400000); ++ for(i = 0x4d4; i< 0x4e4; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x00030303); ++ for(i = 0x4f4; i< 0x504; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x00080000); ++ for(i = 0x50c; i< 0x51c; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x01012000); ++ for(i = 0x51c; i< 0x52c; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x000105b8); ++ for(i = 0x52c; i< 0x53c; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x00080008); ++ for(i = 0x55c; i< 0x59c; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x5a4/4, 0x4b7fffff); ++ INSTANCE_WR(ctx, 0x5fc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x604/4, 0x00004000); ++ INSTANCE_WR(ctx, 0x610/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x618/4, 0x00040000); ++ INSTANCE_WR(ctx, 0x61c/4, 0x00010000); ++ ++ for (i=0x1a9c; i <= 0x22fc/4; i += 32) { ++ INSTANCE_WR(ctx, i/4 , 0x10700ff9); ++ INSTANCE_WR(ctx, i/4 + 1, 0x0436086c); ++ INSTANCE_WR(ctx, i/4 + 2, 0x000c001b); ++ } ++ ++ INSTANCE_WR(ctx, 0x269c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x26b0/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x26dc/4, 0x40000000); ++ INSTANCE_WR(ctx, 0x26e0/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x26e4/4, 0x3f000000); ++ INSTANCE_WR(ctx, 0x26ec/4, 0x40000000); ++ INSTANCE_WR(ctx, 0x26f0/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x26f8/4, 0xbf800000); ++ INSTANCE_WR(ctx, 0x2700/4, 0xbf800000); ++ INSTANCE_WR(ctx, 0x3024/4, 0x000fe000); ++ INSTANCE_WR(ctx, 0x30a0/4, 0x000003f8); ++ INSTANCE_WR(ctx, 0x33fc/4, 0x002fe000); ++ for(i = 0x341c; i< 0x343c; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x001c527c); ++} ++ ++static void nv25_graph_context_init(struct drm_device *dev, ++ struct nouveau_gpuobj *ctx) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int i; ++/* ++write32 #1 block at +0x00740a7c NV_PRAMIN.GRCTX0+0x35c of 173 (0xad) elements: +++0x00740a7c: ffff0000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +++0x00740a9c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +++0x00740abc: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +++0x00740adc: 00000000 0fff0000 0fff0000 00000000 00000000 00000000 00000000 00000000 +++0x00740afc: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +++0x00740b1c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +++0x00740b3c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +++0x00740b5c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 ++ +++0x00740b7c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +++0x00740b9c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +++0x00740bbc: 00000101 00000000 00000000 00000000 00000000 00000111 00000000 00000000 +++0x00740bdc: 00000000 00000000 00000000 00000080 ffff0000 00000001 00000000 00000000 +++0x00740bfc: 00000000 00000000 44400000 00000000 00000000 00000000 00000000 00000000 +++0x00740c1c: 4b800000 00000000 00000000 00000000 00000000 00030303 00030303 00030303 +++0x00740c3c: 00030303 00000000 00000000 00000000 00000000 00080000 00080000 00080000 +++0x00740c5c: 00080000 00000000 00000000 01012000 01012000 01012000 01012000 000105b8 ++ +++0x00740c7c: 000105b8 000105b8 000105b8 00080008 00080008 00080008 00080008 00000000 +++0x00740c9c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 07ff0000 +++0x00740cbc: 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 +++0x00740cdc: 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 00000000 +++0x00740cfc: 00000000 4b7fffff 00000000 00000000 00000000 00000000 00000000 00000000 +++0x00740d1c: 00000000 00000000 00000000 00000000 00000000 ++*/ ++ INSTANCE_WR(ctx, (0x35c/4)+0, 0xffff0000); ++ INSTANCE_WR(ctx, (0x35c/4)+25, 0x0fff0000); ++ INSTANCE_WR(ctx, (0x35c/4)+26, 0x0fff0000); ++ INSTANCE_WR(ctx, (0x35c/4)+80, 0x00000101); ++ INSTANCE_WR(ctx, (0x35c/4)+85, 0x00000111); ++ INSTANCE_WR(ctx, (0x35c/4)+91, 0x00000080); ++ INSTANCE_WR(ctx, (0x35c/4)+92, 0xffff0000); ++ INSTANCE_WR(ctx, (0x35c/4)+93, 0x00000001); ++ INSTANCE_WR(ctx, (0x35c/4)+98, 0x44400000); ++ INSTANCE_WR(ctx, (0x35c/4)+104, 0x4b800000); ++ INSTANCE_WR(ctx, (0x35c/4)+109, 0x00030303); ++ INSTANCE_WR(ctx, (0x35c/4)+110, 0x00030303); ++ INSTANCE_WR(ctx, (0x35c/4)+111, 0x00030303); ++ INSTANCE_WR(ctx, (0x35c/4)+112, 0x00030303); ++ INSTANCE_WR(ctx, (0x35c/4)+117, 0x00080000); ++ INSTANCE_WR(ctx, (0x35c/4)+118, 0x00080000); ++ INSTANCE_WR(ctx, (0x35c/4)+119, 0x00080000); ++ INSTANCE_WR(ctx, (0x35c/4)+120, 0x00080000); ++ INSTANCE_WR(ctx, (0x35c/4)+123, 0x01012000); ++ INSTANCE_WR(ctx, (0x35c/4)+124, 0x01012000); ++ INSTANCE_WR(ctx, (0x35c/4)+125, 0x01012000); ++ INSTANCE_WR(ctx, (0x35c/4)+126, 0x01012000); ++ INSTANCE_WR(ctx, (0x35c/4)+127, 0x000105b8); ++ INSTANCE_WR(ctx, (0x35c/4)+128, 0x000105b8); ++ INSTANCE_WR(ctx, (0x35c/4)+129, 0x000105b8); ++ INSTANCE_WR(ctx, (0x35c/4)+130, 0x000105b8); ++ INSTANCE_WR(ctx, (0x35c/4)+131, 0x00080008); ++ INSTANCE_WR(ctx, (0x35c/4)+132, 0x00080008); ++ INSTANCE_WR(ctx, (0x35c/4)+133, 0x00080008); ++ INSTANCE_WR(ctx, (0x35c/4)+134, 0x00080008); ++ for (i=0; i<16; ++i) ++ INSTANCE_WR(ctx, (0x35c/4)+143+i, 0x07ff0000); ++ INSTANCE_WR(ctx, (0x35c/4)+161, 0x4b7fffff); ++ ++/* ++write32 #1 block at +0x00740d34 NV_PRAMIN.GRCTX0+0x614 of 3136 (0xc40) elements: +++0x00740d34: 00000000 00000000 00000000 00000080 30201000 70605040 b0a09080 f0e0d0c0 +++0x00740d54: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +++0x00740d74: 00000000 00000000 00000000 00000000 00000001 00000000 00004000 00000000 +++0x00740d94: 00000000 00000001 00000000 00040000 00010000 00000000 00000000 00000000 +++0x00740db4: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 ++... +++0x00742214: 00000000 00000000 00000000 00000000 10700ff9 0436086c 000c001b 00000000 +++0x00742234: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000 +++0x00742254: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000 +++0x00742274: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000 ++... +++0x00742a34: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000 +++0x00742a54: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000 +++0x00742a74: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000 +++0x00742a94: 10700ff9 0436086c 000c001b 00000000 00000000 00000000 00000000 00000000 +++0x00742ab4: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +++0x00742ad4: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 ++*/ ++ INSTANCE_WR(ctx, (0x614/4)+3, 0x00000080); ++ INSTANCE_WR(ctx, (0x614/4)+4, 0x30201000); ++ INSTANCE_WR(ctx, (0x614/4)+5, 0x70605040); ++ INSTANCE_WR(ctx, (0x614/4)+6, 0xb0a09080); ++ INSTANCE_WR(ctx, (0x614/4)+7, 0xf0e0d0c0); ++ INSTANCE_WR(ctx, (0x614/4)+20, 0x00000001); ++ INSTANCE_WR(ctx, (0x614/4)+22, 0x00004000); ++ INSTANCE_WR(ctx, (0x614/4)+25, 0x00000001); ++ INSTANCE_WR(ctx, (0x614/4)+27, 0x00040000); ++ INSTANCE_WR(ctx, (0x614/4)+28, 0x00010000); ++ for (i=0; i < 0x880/4; i+=4) { ++ INSTANCE_WR(ctx, (0x1b04/4)+i+0, 0x10700ff9); ++ INSTANCE_WR(ctx, (0x1b04/4)+i+1, 0x0436086c); ++ INSTANCE_WR(ctx, (0x1b04/4)+i+2, 0x000c001b); ++ } ++ ++/* ++write32 #1 block at +0x00742e24 NV_PRAMIN.GRCTX0+0x2704 of 4 (0x4) elements: +++0x00742e24: 3f800000 00000000 00000000 00000000 ++*/ ++ INSTANCE_WR(ctx, (0x2704/4), 0x3f800000); ++ ++/* ++write32 #1 block at +0x00742e64 NV_PRAMIN.GRCTX0+0x2744 of 12 (0xc) elements: +++0x00742e64: 40000000 3f800000 3f000000 00000000 40000000 3f800000 00000000 bf800000 +++0x00742e84: 00000000 bf800000 00000000 00000000 ++*/ ++ INSTANCE_WR(ctx, (0x2744/4)+0, 0x40000000); ++ INSTANCE_WR(ctx, (0x2744/4)+1, 0x3f800000); ++ INSTANCE_WR(ctx, (0x2744/4)+2, 0x3f000000); ++ INSTANCE_WR(ctx, (0x2744/4)+4, 0x40000000); ++ INSTANCE_WR(ctx, (0x2744/4)+5, 0x3f800000); ++ INSTANCE_WR(ctx, (0x2744/4)+7, 0xbf800000); ++ INSTANCE_WR(ctx, (0x2744/4)+9, 0xbf800000); ++ ++/* ++write32 #1 block at +0x00742e34 NV_PRAMIN.GRCTX0+0x2714 of 4 (0x4) elements: +++0x00742e34: 00000000 3f800000 00000000 00000000 ++*/ ++ INSTANCE_WR(ctx, (0x2714/4)+1, 0x3f800000); ++ ++/* ++write32 #1 block at +0x00742e94 NV_PRAMIN.GRCTX0+0x2774 of 4 (0x4) elements: +++0x00742e94: 00000000 00000000 00000000 00000000 ++write32 #1 block at +0x00743804 NV_PRAMIN.GRCTX0+0x30e4 of 4 (0x4) elements: +++0x00743804: 00000000 00000000 00000000 00000000 ++write32 #1 block at +0x007437a4 NV_PRAMIN.GRCTX0+0x3084 of 8 (0x8) elements: +++0x007437a4: 00000000 00000000 000fe000 00000000 00000000 00000000 00000000 00000000 ++*/ ++ INSTANCE_WR(ctx, (0x3084/4)+2, 0x000fe000); ++ ++/* ++write32 #1 block at +0x007437d4 NV_PRAMIN.GRCTX0+0x30b4 of 4 (0x4) elements: +++0x007437d4: 00000000 00000000 00000000 00000000 ++write32 #1 block at +0x00743824 NV_PRAMIN.GRCTX0+0x3104 of 4 (0x4) elements: +++0x00743824: 00000000 000003f8 00000000 00000000 ++*/ ++ INSTANCE_WR(ctx, (0x3104/4)+1, 0x000003f8); ++ ++/* write32 #1 NV_PRAMIN.GRCTX0+0x3468 <- 0x002fe000 */ ++ INSTANCE_WR(ctx, 0x3468/4, 0x002fe000); ++ ++/* ++write32 #1 block at +0x00743ba4 NV_PRAMIN.GRCTX0+0x3484 of 8 (0x8) elements: +++0x00743ba4: 001c527c 001c527c 001c527c 001c527c 001c527c 001c527c 001c527c 001c527c ++*/ ++ for (i=0; i<8; ++i) ++ INSTANCE_WR(ctx, (0x3484/4)+i, 0x001c527c); ++} ++ ++static void nv30_31_graph_context_init(struct drm_device *dev, ++ struct nouveau_gpuobj *ctx) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int i; ++ ++ INSTANCE_WR(ctx, 0x410/4, 0x00000101); ++ INSTANCE_WR(ctx, 0x424/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x428/4, 0x00000060); ++ INSTANCE_WR(ctx, 0x444/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x448/4, 0xffff0000); ++ INSTANCE_WR(ctx, 0x44c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x460/4, 0x44400000); ++ INSTANCE_WR(ctx, 0x48c/4, 0xffff0000); ++ for(i = 0x4e0; i< 0x4e8; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x0fff0000); ++ INSTANCE_WR(ctx, 0x4ec/4, 0x00011100); ++ for(i = 0x508; i< 0x548; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x550/4, 0x4b7fffff); ++ INSTANCE_WR(ctx, 0x58c/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x590/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x594/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x598/4, 0xb8a89888); ++ INSTANCE_WR(ctx, 0x59c/4, 0xf8e8d8c8); ++ INSTANCE_WR(ctx, 0x5b0/4, 0xb0000000); ++ for(i = 0x600; i< 0x640; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x00010588); ++ for(i = 0x640; i< 0x680; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x00030303); ++ for(i = 0x6c0; i< 0x700; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x0008aae4); ++ for(i = 0x700; i< 0x740; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x01012000); ++ for(i = 0x740; i< 0x780; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x00080008); ++ INSTANCE_WR(ctx, 0x85c/4, 0x00040000); ++ INSTANCE_WR(ctx, 0x860/4, 0x00010000); ++ for(i = 0x864; i< 0x874; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x00040004); ++ for(i = 0x1f18; i<= 0x3088 ; i+= 16) { ++ INSTANCE_WR(ctx, i/4 + 0, 0x10700ff9); ++ INSTANCE_WR(ctx, i/4 + 1, 0x0436086c); ++ INSTANCE_WR(ctx, i/4 + 2, 0x000c001b); ++ } ++ for(i = 0x30b8; i< 0x30c8; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x344c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x3808/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x381c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x3848/4, 0x40000000); ++ INSTANCE_WR(ctx, 0x384c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x3850/4, 0x3f000000); ++ INSTANCE_WR(ctx, 0x3858/4, 0x40000000); ++ INSTANCE_WR(ctx, 0x385c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x3864/4, 0xbf800000); ++ INSTANCE_WR(ctx, 0x386c/4, 0xbf800000); ++} ++ ++static void nv34_graph_context_init(struct drm_device *dev, ++ struct nouveau_gpuobj *ctx) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int i; ++ ++ INSTANCE_WR(ctx, 0x40c/4, 0x01000101); ++ INSTANCE_WR(ctx, 0x420/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x424/4, 0x00000060); ++ INSTANCE_WR(ctx, 0x440/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x444/4, 0xffff0000); ++ INSTANCE_WR(ctx, 0x448/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x45c/4, 0x44400000); ++ INSTANCE_WR(ctx, 0x480/4, 0xffff0000); ++ for(i = 0x4d4; i< 0x4dc; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x0fff0000); ++ INSTANCE_WR(ctx, 0x4e0/4, 0x00011100); ++ for(i = 0x4fc; i< 0x53c; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x544/4, 0x4b7fffff); ++ INSTANCE_WR(ctx, 0x57c/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x580/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x584/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x588/4, 0xb8a89888); ++ INSTANCE_WR(ctx, 0x58c/4, 0xf8e8d8c8); ++ INSTANCE_WR(ctx, 0x5a0/4, 0xb0000000); ++ for(i = 0x5f0; i< 0x630; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x00010588); ++ for(i = 0x630; i< 0x670; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x00030303); ++ for(i = 0x6b0; i< 0x6f0; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x0008aae4); ++ for(i = 0x6f0; i< 0x730; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x01012000); ++ for(i = 0x730; i< 0x770; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x00080008); ++ INSTANCE_WR(ctx, 0x850/4, 0x00040000); ++ INSTANCE_WR(ctx, 0x854/4, 0x00010000); ++ for(i = 0x858; i< 0x868; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x00040004); ++ for(i = 0x15ac; i<= 0x271c ; i+= 16) { ++ INSTANCE_WR(ctx, i/4 + 0, 0x10700ff9); ++ INSTANCE_WR(ctx, i/4 + 1, 0x0436086c); ++ INSTANCE_WR(ctx, i/4 + 2, 0x000c001b); ++ } ++ for(i = 0x274c; i< 0x275c; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x2ae0/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x2e9c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x2eb0/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x2edc/4, 0x40000000); ++ INSTANCE_WR(ctx, 0x2ee0/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x2ee4/4, 0x3f000000); ++ INSTANCE_WR(ctx, 0x2eec/4, 0x40000000); ++ INSTANCE_WR(ctx, 0x2ef0/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x2ef8/4, 0xbf800000); ++ INSTANCE_WR(ctx, 0x2f00/4, 0xbf800000); ++} ++ ++static void nv35_36_graph_context_init(struct drm_device *dev, ++ struct nouveau_gpuobj *ctx) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int i; ++ ++ INSTANCE_WR(ctx, 0x40c/4, 0x00000101); ++ INSTANCE_WR(ctx, 0x420/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x424/4, 0x00000060); ++ INSTANCE_WR(ctx, 0x440/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x444/4, 0xffff0000); ++ INSTANCE_WR(ctx, 0x448/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x45c/4, 0x44400000); ++ INSTANCE_WR(ctx, 0x488/4, 0xffff0000); ++ for(i = 0x4dc; i< 0x4e4; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x0fff0000); ++ INSTANCE_WR(ctx, 0x4e8/4, 0x00011100); ++ for(i = 0x504; i< 0x544; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x54c/4, 0x4b7fffff); ++ INSTANCE_WR(ctx, 0x588/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x58c/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x590/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x594/4, 0xb8a89888); ++ INSTANCE_WR(ctx, 0x598/4, 0xf8e8d8c8); ++ INSTANCE_WR(ctx, 0x5ac/4, 0xb0000000); ++ for(i = 0x604; i< 0x644; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x00010588); ++ for(i = 0x644; i< 0x684; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x00030303); ++ for(i = 0x6c4; i< 0x704; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x0008aae4); ++ for(i = 0x704; i< 0x744; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x01012000); ++ for(i = 0x744; i< 0x784; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x00080008); ++ INSTANCE_WR(ctx, 0x860/4, 0x00040000); ++ INSTANCE_WR(ctx, 0x864/4, 0x00010000); ++ for(i = 0x868; i< 0x878; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x00040004); ++ for(i = 0x1f1c; i<= 0x308c ; i+= 16) { ++ INSTANCE_WR(ctx, i/4 + 0, 0x10700ff9); ++ INSTANCE_WR(ctx, i/4 + 1, 0x0436086c); ++ INSTANCE_WR(ctx, i/4 + 2, 0x000c001b); ++ } ++ for(i = 0x30bc; i< 0x30cc; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x3450/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x380c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x3820/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x384c/4, 0x40000000); ++ INSTANCE_WR(ctx, 0x3850/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x3854/4, 0x3f000000); ++ INSTANCE_WR(ctx, 0x385c/4, 0x40000000); ++ INSTANCE_WR(ctx, 0x3860/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x3868/4, 0xbf800000); ++ INSTANCE_WR(ctx, 0x3870/4, 0xbf800000); ++} ++ ++int nv20_graph_create_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ void (*ctx_init)(struct drm_device *, struct nouveau_gpuobj *); ++ unsigned int ctx_size; ++ unsigned int idoffs = 0x28/4; ++ int ret; ++ ++ switch (dev_priv->chipset) { ++ case 0x20: ++ ctx_size = NV20_GRCTX_SIZE; ++ ctx_init = nv20_graph_context_init; ++ idoffs = 0; ++ break; ++ case 0x25: ++ case 0x28: ++ ctx_size = NV25_GRCTX_SIZE; ++ ctx_init = nv25_graph_context_init; ++ break; ++ case 0x2a: ++ ctx_size = NV2A_GRCTX_SIZE; ++ ctx_init = nv2a_graph_context_init; ++ idoffs = 0; ++ break; ++ case 0x30: ++ case 0x31: ++ ctx_size = NV30_31_GRCTX_SIZE; ++ ctx_init = nv30_31_graph_context_init; ++ break; ++ case 0x34: ++ ctx_size = NV34_GRCTX_SIZE; ++ ctx_init = nv34_graph_context_init; ++ break; ++ case 0x35: ++ case 0x36: ++ ctx_size = NV35_36_GRCTX_SIZE; ++ ctx_init = nv35_36_graph_context_init; ++ break; ++ default: ++ ctx_size = 0; ++ ctx_init = nv35_36_graph_context_init; ++ DRM_ERROR("Please contact the devs if you want your NV%x" ++ " card to work\n", dev_priv->chipset); ++ return -ENOSYS; ++ break; ++ } ++ ++ if ((ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, ctx_size, 16, ++ NVOBJ_FLAG_ZERO_ALLOC, ++ &chan->ramin_grctx))) ++ return ret; ++ ++ /* Initialise default context values */ ++ ctx_init(dev, chan->ramin_grctx->gpuobj); ++ ++ /* nv20: INSTANCE_WR(chan->ramin_grctx->gpuobj, 10, chan->id<<24); */ ++ INSTANCE_WR(chan->ramin_grctx->gpuobj, idoffs, (chan->id<<24)|0x1); ++ /* CTX_USER */ ++ ++ INSTANCE_WR(dev_priv->ctx_table->gpuobj, chan->id, ++ chan->ramin_grctx->instance >> 4); ++ ++ return 0; ++} ++ ++void nv20_graph_destroy_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ if (chan->ramin_grctx) ++ nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx); ++ ++ INSTANCE_WR(dev_priv->ctx_table->gpuobj, chan->id, 0); ++} ++ ++int nv20_graph_load_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t inst; ++ ++ if (!chan->ramin_grctx) ++ return -EINVAL; ++ inst = chan->ramin_grctx->instance >> 4; ++ ++ NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, inst); ++ NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_XFER, ++ NV20_PGRAPH_CHANNEL_CTX_XFER_LOAD); ++ NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10010100); ++ ++ nouveau_wait_for_idle(dev); ++ return 0; ++} ++ ++int nv20_graph_save_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t inst; ++ ++ if (!chan->ramin_grctx) ++ return -EINVAL; ++ inst = chan->ramin_grctx->instance >> 4; ++ ++ NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, inst); ++ NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_XFER, ++ NV20_PGRAPH_CHANNEL_CTX_XFER_SAVE); ++ ++ nouveau_wait_for_idle(dev); ++ return 0; ++} ++ ++static void nv20_graph_rdi(struct drm_device *dev) { ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int i, writecount = 32; ++ uint32_t rdi_index = 0x2c80000; ++ ++ if (dev_priv->chipset == 0x20) { ++ rdi_index = 0x3d0000; ++ writecount = 15; ++ } ++ ++ NV_WRITE(NV10_PGRAPH_RDI_INDEX, rdi_index); ++ for (i = 0; i < writecount; i++) ++ NV_WRITE(NV10_PGRAPH_RDI_DATA, 0); ++ ++ nouveau_wait_for_idle(dev); ++} ++ ++int nv20_graph_init(struct drm_device *dev) { ++ struct drm_nouveau_private *dev_priv = ++ (struct drm_nouveau_private *)dev->dev_private; ++ uint32_t tmp, vramsz; ++ int ret, i; ++ ++ NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) & ++ ~NV_PMC_ENABLE_PGRAPH); ++ NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) | ++ NV_PMC_ENABLE_PGRAPH); ++ ++ if (!dev_priv->ctx_table) { ++ /* Create Context Pointer Table */ ++ dev_priv->ctx_table_size = 32 * 4; ++ if ((ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, ++ dev_priv->ctx_table_size, 16, ++ NVOBJ_FLAG_ZERO_ALLOC, ++ &dev_priv->ctx_table))) ++ return ret; ++ } ++ ++ NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_TABLE, ++ dev_priv->ctx_table->instance >> 4); ++ ++ nv20_graph_rdi(dev); ++ ++ NV_WRITE(NV03_PGRAPH_INTR , 0xFFFFFFFF); ++ NV_WRITE(NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); ++ ++ NV_WRITE(NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF); ++ NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x00000000); ++ NV_WRITE(NV04_PGRAPH_DEBUG_1, 0x00118700); ++ NV_WRITE(NV04_PGRAPH_DEBUG_3, 0xF3CE0475); /* 0x4 = auto ctx switch */ ++ NV_WRITE(NV10_PGRAPH_DEBUG_4, 0x00000000); ++ NV_WRITE(0x40009C , 0x00000040); ++ ++ if (dev_priv->chipset >= 0x25) { ++ NV_WRITE(0x400890, 0x00080000); ++ NV_WRITE(0x400610, 0x304B1FB6); ++ NV_WRITE(0x400B80, 0x18B82880); ++ NV_WRITE(0x400B84, 0x44000000); ++ NV_WRITE(0x400098, 0x40000080); ++ NV_WRITE(0x400B88, 0x000000ff); ++ } else { ++ NV_WRITE(0x400880, 0x00080000); /* 0x0008c7df */ ++ NV_WRITE(0x400094, 0x00000005); ++ NV_WRITE(0x400B80, 0x45CAA208); /* 0x45eae20e */ ++ NV_WRITE(0x400B84, 0x24000000); ++ NV_WRITE(0x400098, 0x00000040); ++ NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00E00038); ++ NV_WRITE(NV10_PGRAPH_RDI_DATA , 0x00000030); ++ NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00E10038); ++ NV_WRITE(NV10_PGRAPH_RDI_DATA , 0x00000030); ++ } ++ ++ /* copy tile info from PFB */ ++ for (i = 0; i < NV10_PFB_TILE__SIZE; i++) { ++ NV_WRITE(0x00400904 + i*0x10, NV_READ(NV10_PFB_TLIMIT(i))); ++ /* which is NV40_PGRAPH_TLIMIT0(i) ?? */ ++ NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0030+i*4); ++ NV_WRITE(NV10_PGRAPH_RDI_DATA, NV_READ(NV10_PFB_TLIMIT(i))); ++ NV_WRITE(0x00400908 + i*0x10, NV_READ(NV10_PFB_TSIZE(i))); ++ /* which is NV40_PGRAPH_TSIZE0(i) ?? */ ++ NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0050+i*4); ++ NV_WRITE(NV10_PGRAPH_RDI_DATA, NV_READ(NV10_PFB_TSIZE(i))); ++ NV_WRITE(0x00400900 + i*0x10, NV_READ(NV10_PFB_TILE(i))); ++ /* which is NV40_PGRAPH_TILE0(i) ?? */ ++ NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0010+i*4); ++ NV_WRITE(NV10_PGRAPH_RDI_DATA, NV_READ(NV10_PFB_TILE(i))); ++ } ++ for (i = 0; i < 8; i++) { ++ NV_WRITE(0x400980+i*4, NV_READ(0x100300+i*4)); ++ NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0090+i*4); ++ NV_WRITE(NV10_PGRAPH_RDI_DATA, NV_READ(0x100300+i*4)); ++ } ++ NV_WRITE(0x4009a0, NV_READ(0x100324)); ++ NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA000C); ++ NV_WRITE(NV10_PGRAPH_RDI_DATA, NV_READ(0x100324)); ++ ++ NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10000100); ++ NV_WRITE(NV10_PGRAPH_STATE , 0xFFFFFFFF); ++ NV_WRITE(NV04_PGRAPH_FIFO , 0x00000001); ++ ++ tmp = NV_READ(NV10_PGRAPH_SURFACE) & 0x0007ff00; ++ NV_WRITE(NV10_PGRAPH_SURFACE, tmp); ++ tmp = NV_READ(NV10_PGRAPH_SURFACE) | 0x00020100; ++ NV_WRITE(NV10_PGRAPH_SURFACE, tmp); ++ ++ /* begin RAM config */ ++ vramsz = drm_get_resource_len(dev, 0) - 1; ++ NV_WRITE(0x4009A4, NV_READ(NV04_PFB_CFG0)); ++ NV_WRITE(0x4009A8, NV_READ(NV04_PFB_CFG1)); ++ NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0000); ++ NV_WRITE(NV10_PGRAPH_RDI_DATA , NV_READ(NV04_PFB_CFG0)); ++ NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0004); ++ NV_WRITE(NV10_PGRAPH_RDI_DATA , NV_READ(NV04_PFB_CFG1)); ++ NV_WRITE(0x400820, 0); ++ NV_WRITE(0x400824, 0); ++ NV_WRITE(0x400864, vramsz-1); ++ NV_WRITE(0x400868, vramsz-1); ++ ++ /* interesting.. the below overwrites some of the tile setup above.. */ ++ NV_WRITE(0x400B20, 0x00000000); ++ NV_WRITE(0x400B04, 0xFFFFFFFF); ++ ++ NV_WRITE(NV03_PGRAPH_ABS_UCLIP_XMIN, 0); ++ NV_WRITE(NV03_PGRAPH_ABS_UCLIP_YMIN, 0); ++ NV_WRITE(NV03_PGRAPH_ABS_UCLIP_XMAX, 0x7fff); ++ NV_WRITE(NV03_PGRAPH_ABS_UCLIP_YMAX, 0x7fff); ++ ++ return 0; ++} ++ ++void nv20_graph_takedown(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ nouveau_gpuobj_ref_del(dev, &dev_priv->ctx_table); ++} ++ ++int nv30_graph_init(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++// uint32_t vramsz, tmp; ++ int ret, i; ++ ++ NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) & ++ ~NV_PMC_ENABLE_PGRAPH); ++ NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) | ++ NV_PMC_ENABLE_PGRAPH); ++ ++ if (!dev_priv->ctx_table) { ++ /* Create Context Pointer Table */ ++ dev_priv->ctx_table_size = 32 * 4; ++ if ((ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, ++ dev_priv->ctx_table_size, 16, ++ NVOBJ_FLAG_ZERO_ALLOC, ++ &dev_priv->ctx_table))) ++ return ret; ++ } ++ ++ NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_TABLE, ++ dev_priv->ctx_table->instance >> 4); ++ ++ NV_WRITE(NV03_PGRAPH_INTR , 0xFFFFFFFF); ++ NV_WRITE(NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); ++ ++ NV_WRITE(NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF); ++ NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x00000000); ++ NV_WRITE(NV04_PGRAPH_DEBUG_1, 0x401287c0); ++ NV_WRITE(0x400890, 0x01b463ff); ++ NV_WRITE(NV04_PGRAPH_DEBUG_3, 0xf2de0475); ++ NV_WRITE(NV10_PGRAPH_DEBUG_4, 0x00008000); ++ NV_WRITE(NV04_PGRAPH_LIMIT_VIOL_PIX, 0xf04bdff6); ++ NV_WRITE(0x400B80, 0x1003d888); ++ NV_WRITE(0x400B84, 0x0c000000); ++ NV_WRITE(0x400098, 0x00000000); ++ NV_WRITE(0x40009C, 0x0005ad00); ++ NV_WRITE(0x400B88, 0x62ff00ff); // suspiciously like PGRAPH_DEBUG_2 ++ NV_WRITE(0x4000a0, 0x00000000); ++ NV_WRITE(0x4000a4, 0x00000008); ++ NV_WRITE(0x4008a8, 0xb784a400); ++ NV_WRITE(0x400ba0, 0x002f8685); ++ NV_WRITE(0x400ba4, 0x00231f3f); ++ NV_WRITE(0x4008a4, 0x40000020); ++ ++ if (dev_priv->chipset == 0x34) { ++ NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0004); ++ NV_WRITE(NV10_PGRAPH_RDI_DATA , 0x00200201); ++ NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0008); ++ NV_WRITE(NV10_PGRAPH_RDI_DATA , 0x00000008); ++ NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0000); ++ NV_WRITE(NV10_PGRAPH_RDI_DATA , 0x00000032); ++ NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00E00004); ++ NV_WRITE(NV10_PGRAPH_RDI_DATA , 0x00000002); ++ } ++ ++ NV_WRITE(0x4000c0, 0x00000016); ++ ++ /* copy tile info from PFB */ ++ for (i = 0; i < NV10_PFB_TILE__SIZE; i++) { ++ NV_WRITE(0x00400904 + i*0x10, NV_READ(NV10_PFB_TLIMIT(i))); ++ /* which is NV40_PGRAPH_TLIMIT0(i) ?? */ ++ NV_WRITE(0x00400908 + i*0x10, NV_READ(NV10_PFB_TSIZE(i))); ++ /* which is NV40_PGRAPH_TSIZE0(i) ?? */ ++ NV_WRITE(0x00400900 + i*0x10, NV_READ(NV10_PFB_TILE(i))); ++ /* which is NV40_PGRAPH_TILE0(i) ?? */ ++ } ++ ++ NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10000100); ++ NV_WRITE(NV10_PGRAPH_STATE , 0xFFFFFFFF); ++ NV_WRITE(0x0040075c , 0x00000001); ++ NV_WRITE(NV04_PGRAPH_FIFO , 0x00000001); ++ ++ /* begin RAM config */ ++// vramsz = drm_get_resource_len(dev, 0) - 1; ++ NV_WRITE(0x4009A4, NV_READ(NV04_PFB_CFG0)); ++ NV_WRITE(0x4009A8, NV_READ(NV04_PFB_CFG1)); ++ if (dev_priv->chipset != 0x34) { ++ NV_WRITE(0x400750, 0x00EA0000); ++ NV_WRITE(0x400754, NV_READ(NV04_PFB_CFG0)); ++ NV_WRITE(0x400750, 0x00EA0004); ++ NV_WRITE(0x400754, NV_READ(NV04_PFB_CFG1)); ++ } ++ ++ ++ return 0; ++} +diff --git a/drivers/gpu/drm/nouveau/nv40_fb.c b/drivers/gpu/drm/nouveau/nv40_fb.c +new file mode 100644 +index 0000000..ae784cb +--- /dev/null ++++ b/drivers/gpu/drm/nouveau/nv40_fb.c +@@ -0,0 +1,62 @@ ++#include "drmP.h" ++#include "drm.h" ++#include "nouveau_drv.h" ++#include "nouveau_drm.h" ++ ++int ++nv40_fb_init(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t fb_bar_size, tmp; ++ int num_tiles; ++ int i; ++ ++ /* This is strictly a NV4x register (don't know about NV5x). */ ++ /* The blob sets these to all kinds of values, and they mess up our setup. */ ++ /* I got value 0x52802 instead. For some cards the blob even sets it back to 0x1. */ ++ /* Note: the blob doesn't read this value, so i'm pretty sure this is safe for all cards. */ ++ /* Any idea what this is? */ ++ NV_WRITE(NV40_PFB_UNK_800, 0x1); ++ ++ switch (dev_priv->chipset) { ++ case 0x40: ++ case 0x45: ++ tmp = NV_READ(NV10_PFB_CLOSE_PAGE2); ++ NV_WRITE(NV10_PFB_CLOSE_PAGE2, tmp & ~(1<<15)); ++ num_tiles = NV10_PFB_TILE__SIZE; ++ break; ++ case 0x46: /* G72 */ ++ case 0x47: /* G70 */ ++ case 0x49: /* G71 */ ++ case 0x4b: /* G73 */ ++ case 0x4c: /* C51 (G7X version) */ ++ num_tiles = NV40_PFB_TILE__SIZE_1; ++ break; ++ default: ++ num_tiles = NV40_PFB_TILE__SIZE_0; ++ break; ++ } ++ ++ fb_bar_size = drm_get_resource_len(dev, 0) - 1; ++ switch (dev_priv->chipset) { ++ case 0x40: ++ for (i=0; iramfc->gpuobj, \ ++ NV40_RAMFC_##offset/4, (val)) ++#define RAMFC_RD(offset) INSTANCE_RD(chan->ramfc->gpuobj, \ ++ NV40_RAMFC_##offset/4) ++#define NV40_RAMFC(c) (dev_priv->ramfc_offset + ((c)*NV40_RAMFC__SIZE)) ++#define NV40_RAMFC__SIZE 128 ++ ++int ++nv40_fifo_create_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int ret; ++ ++ if ((ret = nouveau_gpuobj_new_fake(dev, NV40_RAMFC(chan->id), ~0, ++ NV40_RAMFC__SIZE, ++ NVOBJ_FLAG_ZERO_ALLOC | ++ NVOBJ_FLAG_ZERO_FREE, ++ NULL, &chan->ramfc))) ++ return ret; ++ ++ /* Fill entries that are seen filled in dumps of nvidia driver just ++ * after channel's is put into DMA mode ++ */ ++ RAMFC_WR(DMA_PUT , chan->pushbuf_base); ++ RAMFC_WR(DMA_GET , chan->pushbuf_base); ++ RAMFC_WR(DMA_INSTANCE , chan->pushbuf->instance >> 4); ++ RAMFC_WR(DMA_FETCH , NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | ++ NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | ++ NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 | ++#ifdef __BIG_ENDIAN ++ NV_PFIFO_CACHE1_BIG_ENDIAN | ++#endif ++ 0x30000000 /* no idea.. */); ++ RAMFC_WR(DMA_SUBROUTINE, 0); ++ RAMFC_WR(GRCTX_INSTANCE, chan->ramin_grctx->instance >> 4); ++ RAMFC_WR(DMA_TIMESLICE , 0x0001FFFF); ++ ++ /* enable the fifo dma operation */ ++ NV_WRITE(NV04_PFIFO_MODE,NV_READ(NV04_PFIFO_MODE)|(1<id)); ++ return 0; ++} ++ ++void ++nv40_fifo_destroy_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ NV_WRITE(NV04_PFIFO_MODE, NV_READ(NV04_PFIFO_MODE)&~(1<id)); ++ ++ if (chan->ramfc) ++ nouveau_gpuobj_ref_del(dev, &chan->ramfc); ++} ++ ++int ++nv40_fifo_load_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t tmp, tmp2; ++ ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_GET , RAMFC_RD(DMA_GET)); ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUT , RAMFC_RD(DMA_PUT)); ++ NV_WRITE(NV10_PFIFO_CACHE1_REF_CNT , RAMFC_RD(REF_CNT)); ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_INSTANCE , RAMFC_RD(DMA_INSTANCE)); ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_DCOUNT , RAMFC_RD(DMA_DCOUNT)); ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_STATE , RAMFC_RD(DMA_STATE)); ++ ++ /* No idea what 0x2058 is.. */ ++ tmp = RAMFC_RD(DMA_FETCH); ++ tmp2 = NV_READ(0x2058) & 0xFFF; ++ tmp2 |= (tmp & 0x30000000); ++ NV_WRITE(0x2058, tmp2); ++ tmp &= ~0x30000000; ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_FETCH , tmp); ++ ++ NV_WRITE(NV04_PFIFO_CACHE1_ENGINE , RAMFC_RD(ENGINE)); ++ NV_WRITE(NV04_PFIFO_CACHE1_PULL1 , RAMFC_RD(PULL1_ENGINE)); ++ NV_WRITE(NV10_PFIFO_CACHE1_ACQUIRE_VALUE , RAMFC_RD(ACQUIRE_VALUE)); ++ NV_WRITE(NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP, RAMFC_RD(ACQUIRE_TIMESTAMP)); ++ NV_WRITE(NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT , RAMFC_RD(ACQUIRE_TIMEOUT)); ++ NV_WRITE(NV10_PFIFO_CACHE1_SEMAPHORE , RAMFC_RD(SEMAPHORE)); ++ NV_WRITE(NV10_PFIFO_CACHE1_DMA_SUBROUTINE , RAMFC_RD(DMA_SUBROUTINE)); ++ NV_WRITE(NV40_PFIFO_GRCTX_INSTANCE , RAMFC_RD(GRCTX_INSTANCE)); ++ NV_WRITE(0x32e4, RAMFC_RD(UNK_40)); ++ /* NVIDIA does this next line twice... */ ++ NV_WRITE(0x32e8, RAMFC_RD(UNK_44)); ++ NV_WRITE(0x2088, RAMFC_RD(UNK_4C)); ++ NV_WRITE(0x3300, RAMFC_RD(UNK_50)); ++ ++ /* not sure what part is PUT, and which is GET.. never seen a non-zero ++ * value appear in a mmio-trace yet.. ++ */ ++ ++ /* Don't clobber the TIMEOUT_ENABLED flag when restoring from RAMFC */ ++ tmp = NV_READ(NV04_PFIFO_DMA_TIMESLICE) & ~0x1FFFF; ++ tmp |= RAMFC_RD(DMA_TIMESLICE) & 0x1FFFF; ++ NV_WRITE(NV04_PFIFO_DMA_TIMESLICE, tmp); ++ ++ /* Set channel active, and in DMA mode */ ++ NV_WRITE(NV03_PFIFO_CACHE1_PUSH1, ++ NV03_PFIFO_CACHE1_PUSH1_DMA | chan->id); ++ ++ /* Reset DMA_CTL_AT_INFO to INVALID */ ++ tmp = NV_READ(NV04_PFIFO_CACHE1_DMA_CTL) & ~(1<<31); ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_CTL, tmp); ++ ++ return 0; ++} ++ ++int ++nv40_fifo_save_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t tmp; ++ ++ RAMFC_WR(DMA_PUT , NV_READ(NV04_PFIFO_CACHE1_DMA_PUT)); ++ RAMFC_WR(DMA_GET , NV_READ(NV04_PFIFO_CACHE1_DMA_GET)); ++ RAMFC_WR(REF_CNT , NV_READ(NV10_PFIFO_CACHE1_REF_CNT)); ++ RAMFC_WR(DMA_INSTANCE , NV_READ(NV04_PFIFO_CACHE1_DMA_INSTANCE)); ++ RAMFC_WR(DMA_DCOUNT , NV_READ(NV04_PFIFO_CACHE1_DMA_DCOUNT)); ++ RAMFC_WR(DMA_STATE , NV_READ(NV04_PFIFO_CACHE1_DMA_STATE)); ++ ++ tmp = NV_READ(NV04_PFIFO_CACHE1_DMA_FETCH); ++ tmp |= NV_READ(0x2058) & 0x30000000; ++ RAMFC_WR(DMA_FETCH , tmp); ++ ++ RAMFC_WR(ENGINE , NV_READ(NV04_PFIFO_CACHE1_ENGINE)); ++ RAMFC_WR(PULL1_ENGINE , NV_READ(NV04_PFIFO_CACHE1_PULL1)); ++ RAMFC_WR(ACQUIRE_VALUE , NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_VALUE)); ++ tmp = NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP); ++ RAMFC_WR(ACQUIRE_TIMESTAMP, tmp); ++ RAMFC_WR(ACQUIRE_TIMEOUT , NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT)); ++ RAMFC_WR(SEMAPHORE , NV_READ(NV10_PFIFO_CACHE1_SEMAPHORE)); ++ ++ /* NVIDIA read 0x3228 first, then write DMA_GET here.. maybe something ++ * more involved depending on the value of 0x3228? ++ */ ++ RAMFC_WR(DMA_SUBROUTINE , NV_READ(NV04_PFIFO_CACHE1_DMA_GET)); ++ ++ RAMFC_WR(GRCTX_INSTANCE , NV_READ(NV40_PFIFO_GRCTX_INSTANCE)); ++ ++ /* No idea what the below is for exactly, ripped from a mmio-trace */ ++ RAMFC_WR(UNK_40 , NV_READ(NV40_PFIFO_UNK32E4)); ++ ++ /* NVIDIA do this next line twice.. bug? */ ++ RAMFC_WR(UNK_44 , NV_READ(0x32e8)); ++ RAMFC_WR(UNK_4C , NV_READ(0x2088)); ++ RAMFC_WR(UNK_50 , NV_READ(0x3300)); ++ ++ ++ return 0; ++} ++ ++int ++nv40_fifo_init(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int ret; ++ ++ if ((ret = nouveau_fifo_init(dev))) ++ return ret; ++ ++ NV_WRITE(NV04_PFIFO_DMA_TIMESLICE, 0x2101ffff); ++ return 0; ++} +diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c +new file mode 100644 +index 0000000..de178f5 +--- /dev/null ++++ b/drivers/gpu/drm/nouveau/nv40_graph.c +@@ -0,0 +1,2193 @@ ++/* ++ * Copyright (C) 2007 Ben Skeggs. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining ++ * a copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sublicense, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial ++ * portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. ++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE ++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION ++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION ++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "nouveau_drv.h" ++ ++/*TODO: deciper what each offset in the context represents. The below ++ * contexts are taken from dumps just after the 3D object is ++ * created. ++ */ ++static void ++nv40_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int i; ++ ++ /* Always has the "instance address" of itself at offset 0 */ ++ INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start); ++ /* unknown */ ++ INSTANCE_WR(ctx, 0x00024/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00028/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00030/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0011c/4, 0x20010001); ++ INSTANCE_WR(ctx, 0x00120/4, 0x0f73ef00); ++ INSTANCE_WR(ctx, 0x00128/4, 0x02008821); ++ INSTANCE_WR(ctx, 0x0016c/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00170/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00174/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x0017c/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x00180/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x00184/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x00188/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x0018c/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x0019c/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x001a0/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x001b0/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x001c0/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x001d0/4, 0x0b0b0b0c); ++ INSTANCE_WR(ctx, 0x00340/4, 0x00040000); ++ INSTANCE_WR(ctx, 0x00350/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x00354/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x00358/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x0035c/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x00388/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x0039c/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x00480/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x00494/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00498/4, 0x00080060); ++ INSTANCE_WR(ctx, 0x004b4/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x004b8/4, 0xffff0000); ++ INSTANCE_WR(ctx, 0x004bc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x004d0/4, 0x46400000); ++ INSTANCE_WR(ctx, 0x004ec/4, 0xffff0000); ++ INSTANCE_WR(ctx, 0x004f8/4, 0x0fff0000); ++ INSTANCE_WR(ctx, 0x004fc/4, 0x0fff0000); ++ INSTANCE_WR(ctx, 0x00504/4, 0x00011100); ++ for (i=0x00520; i<=0x0055c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x00568/4, 0x4b7fffff); ++ INSTANCE_WR(ctx, 0x00594/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x00598/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x0059c/4, 0xb8a89888); ++ INSTANCE_WR(ctx, 0x005a0/4, 0xf8e8d8c8); ++ INSTANCE_WR(ctx, 0x005b4/4, 0x40100000); ++ INSTANCE_WR(ctx, 0x005cc/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x005d8/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x0060c/4, 0x435185d6); ++ INSTANCE_WR(ctx, 0x00610/4, 0x2155b699); ++ INSTANCE_WR(ctx, 0x00614/4, 0xfedcba98); ++ INSTANCE_WR(ctx, 0x00618/4, 0x00000098); ++ INSTANCE_WR(ctx, 0x00628/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x0062c/4, 0x00ff7000); ++ INSTANCE_WR(ctx, 0x00630/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00640/4, 0x00ff0000); ++ INSTANCE_WR(ctx, 0x0067c/4, 0x00ffff00); ++ /* 0x680-0x6BC - NV30_TCL_PRIMITIVE_3D_TX_ADDRESS_UNIT(0-15) */ ++ /* 0x6C0-0x6FC - NV30_TCL_PRIMITIVE_3D_TX_FORMAT_UNIT(0-15) */ ++ for (i=0x006C0; i<=0x006fc; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00018488); ++ /* 0x700-0x73C - NV30_TCL_PRIMITIVE_3D_TX_WRAP_UNIT(0-15) */ ++ for (i=0x00700; i<=0x0073c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00028202); ++ /* 0x740-0x77C - NV30_TCL_PRIMITIVE_3D_TX_ENABLE_UNIT(0-15) */ ++ /* 0x780-0x7BC - NV30_TCL_PRIMITIVE_3D_TX_SWIZZLE_UNIT(0-15) */ ++ for (i=0x00780; i<=0x007bc; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x0000aae4); ++ /* 0x7C0-0x7FC - NV30_TCL_PRIMITIVE_3D_TX_FILTER_UNIT(0-15) */ ++ for (i=0x007c0; i<=0x007fc; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x01012000); ++ /* 0x800-0x83C - NV30_TCL_PRIMITIVE_3D_TX_XY_DIM_UNIT(0-15) */ ++ for (i=0x00800; i<=0x0083c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00080008); ++ /* 0x840-0x87C - NV30_TCL_PRIMITIVE_3D_TX_UNK07_UNIT(0-15) */ ++ /* 0x880-0x8BC - NV30_TCL_PRIMITIVE_3D_TX_DEPTH_UNIT(0-15) */ ++ for (i=0x00880; i<=0x008bc; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00100008); ++ /* unknown */ ++ for (i=0x00910; i<=0x0091c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x0001bc80); ++ for (i=0x00920; i<=0x0092c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00000202); ++ for (i=0x00940; i<=0x0094c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00000008); ++ for (i=0x00960; i<=0x0096c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00080008); ++ INSTANCE_WR(ctx, 0x00980/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x009b4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x009c0/4, 0x3e020200); ++ INSTANCE_WR(ctx, 0x009c4/4, 0x00ffffff); ++ INSTANCE_WR(ctx, 0x009c8/4, 0x60103f00); ++ INSTANCE_WR(ctx, 0x009d4/4, 0x00020000); ++ INSTANCE_WR(ctx, 0x00a08/4, 0x00008100); ++ INSTANCE_WR(ctx, 0x00aac/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00af0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00af8/4, 0x80800001); ++ INSTANCE_WR(ctx, 0x00bcc/4, 0x00000005); ++ INSTANCE_WR(ctx, 0x00bf8/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00bfc/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00c00/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00c04/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00c08/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00c0c/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00c44/4, 0x00000001); ++ for (i=0x03008; i<=0x03080; i+=8) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i=0x05288; i<=0x08570; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for (i=0x08628; i<=0x08e18; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i=0x0bd28; i<=0x0f010; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for (i=0x0f0c8; i<=0x0f8b8; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i=0x127c8; i<=0x15ab0; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for (i=0x15b68; i<=0x16358; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i=0x19268; i<=0x1c550; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for (i=0x1c608; i<=0x1cdf8; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i=0x1fd08; i<=0x22ff0; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for (i=0x230a8; i<=0x23898; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i=0x267a8; i<=0x29a90; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for (i=0x29b48; i<=0x2a338; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++} ++ ++static void ++nv41_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int i; ++ ++ INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start); ++ INSTANCE_WR(ctx, 0x00000024/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00000028/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00000030/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0000011c/4, 0x20010001); ++ INSTANCE_WR(ctx, 0x00000120/4, 0x0f73ef00); ++ INSTANCE_WR(ctx, 0x00000128/4, 0x02008821); ++ for (i = 0x00000178; i <= 0x00000180; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00000188/4, 0x00000040); ++ for (i = 0x00000194; i <= 0x000001b0; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x000001d0/4, 0x0b0b0b0c); ++ INSTANCE_WR(ctx, 0x00000340/4, 0x00040000); ++ for (i = 0x00000350; i <= 0x0000035c; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x00000388/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x0000039c/4, 0x00001010); ++ INSTANCE_WR(ctx, 0x000003cc/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x000003d0/4, 0x00080060); ++ INSTANCE_WR(ctx, 0x000003ec/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x000003f0/4, 0xffff0000); ++ INSTANCE_WR(ctx, 0x000003f4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00000408/4, 0x46400000); ++ INSTANCE_WR(ctx, 0x00000418/4, 0xffff0000); ++ INSTANCE_WR(ctx, 0x00000424/4, 0x0fff0000); ++ INSTANCE_WR(ctx, 0x00000428/4, 0x0fff0000); ++ INSTANCE_WR(ctx, 0x00000430/4, 0x00011100); ++ for (i = 0x0000044c; i <= 0x00000488; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x00000494/4, 0x4b7fffff); ++ INSTANCE_WR(ctx, 0x000004bc/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x000004c0/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x000004c4/4, 0xb8a89888); ++ INSTANCE_WR(ctx, 0x000004c8/4, 0xf8e8d8c8); ++ INSTANCE_WR(ctx, 0x000004dc/4, 0x40100000); ++ INSTANCE_WR(ctx, 0x000004f8/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x0000052c/4, 0x435185d6); ++ INSTANCE_WR(ctx, 0x00000530/4, 0x2155b699); ++ INSTANCE_WR(ctx, 0x00000534/4, 0xfedcba98); ++ INSTANCE_WR(ctx, 0x00000538/4, 0x00000098); ++ INSTANCE_WR(ctx, 0x00000548/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x0000054c/4, 0x00ff7000); ++ INSTANCE_WR(ctx, 0x00000550/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00000560/4, 0x00ff0000); ++ INSTANCE_WR(ctx, 0x00000598/4, 0x00ffff00); ++ for (i = 0x000005dc; i <= 0x00000618; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x00018488); ++ for (i = 0x0000061c; i <= 0x00000658; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x00028202); ++ for (i = 0x0000069c; i <= 0x000006d8; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x0000aae4); ++ for (i = 0x000006dc; i <= 0x00000718; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x01012000); ++ for (i = 0x0000071c; i <= 0x00000758; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x00080008); ++ for (i = 0x0000079c; i <= 0x000007d8; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x00100008); ++ for (i = 0x0000082c; i <= 0x00000838; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x0001bc80); ++ for (i = 0x0000083c; i <= 0x00000848; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x00000202); ++ for (i = 0x0000085c; i <= 0x00000868; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x00000008); ++ for (i = 0x0000087c; i <= 0x00000888; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x00080008); ++ INSTANCE_WR(ctx, 0x0000089c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x000008d0/4, 0x00000021); ++ INSTANCE_WR(ctx, 0x000008d4/4, 0x030c30c3); ++ INSTANCE_WR(ctx, 0x000008e0/4, 0x3e020200); ++ INSTANCE_WR(ctx, 0x000008e4/4, 0x00ffffff); ++ INSTANCE_WR(ctx, 0x000008e8/4, 0x20103f00); ++ INSTANCE_WR(ctx, 0x000008f4/4, 0x00020000); ++ INSTANCE_WR(ctx, 0x0000092c/4, 0x00008100); ++ INSTANCE_WR(ctx, 0x000009b8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x000009fc/4, 0x00001001); ++ INSTANCE_WR(ctx, 0x00000a04/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x00000a08/4, 0x00888001); ++ INSTANCE_WR(ctx, 0x00000aac/4, 0x00000005); ++ INSTANCE_WR(ctx, 0x00000ab8/4, 0x0000ffff); ++ for (i = 0x00000ad4; i <= 0x00000ae4; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00000ae8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00000b20/4, 0x00000001); ++ for (i = 0x00002ee8; i <= 0x00002f60; i += 8) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i = 0x00005168; i <= 0x00007358; i += 24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for (i = 0x00007368; i <= 0x00007758; i += 16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i = 0x0000a068; i <= 0x0000c258; i += 24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for (i = 0x0000c268; i <= 0x0000c658; i += 16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i = 0x0000ef68; i <= 0x00011158; i += 24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for (i = 0x00011168; i <= 0x00011558; i += 16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i = 0x00013e68; i <= 0x00016058; i += 24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for (i = 0x00016068; i <= 0x00016458; i += 16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++}; ++ ++static void ++nv43_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int i; ++ ++ INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start); ++ INSTANCE_WR(ctx, 0x00024/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00028/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00030/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0011c/4, 0x20010001); ++ INSTANCE_WR(ctx, 0x00120/4, 0x0f73ef00); ++ INSTANCE_WR(ctx, 0x00128/4, 0x02008821); ++ INSTANCE_WR(ctx, 0x00178/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x0017c/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00180/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00188/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00194/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x00198/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x0019c/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x001a0/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x001a4/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x001a8/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x001ac/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x001b0/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x001d0/4, 0x0b0b0b0c); ++ INSTANCE_WR(ctx, 0x00340/4, 0x00040000); ++ INSTANCE_WR(ctx, 0x00350/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x00354/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x00358/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x0035c/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x00388/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x0039c/4, 0x00001010); ++ INSTANCE_WR(ctx, 0x003cc/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x003d0/4, 0x00080060); ++ INSTANCE_WR(ctx, 0x003ec/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x003f0/4, 0xffff0000); ++ INSTANCE_WR(ctx, 0x003f4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00408/4, 0x46400000); ++ INSTANCE_WR(ctx, 0x00418/4, 0xffff0000); ++ INSTANCE_WR(ctx, 0x00424/4, 0x0fff0000); ++ INSTANCE_WR(ctx, 0x00428/4, 0x0fff0000); ++ INSTANCE_WR(ctx, 0x00430/4, 0x00011100); ++ for (i=0x0044c; i<=0x00488; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x00494/4, 0x4b7fffff); ++ INSTANCE_WR(ctx, 0x004bc/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x004c0/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x004c4/4, 0xb8a89888); ++ INSTANCE_WR(ctx, 0x004c8/4, 0xf8e8d8c8); ++ INSTANCE_WR(ctx, 0x004dc/4, 0x40100000); ++ INSTANCE_WR(ctx, 0x004f8/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x0052c/4, 0x435185d6); ++ INSTANCE_WR(ctx, 0x00530/4, 0x2155b699); ++ INSTANCE_WR(ctx, 0x00534/4, 0xfedcba98); ++ INSTANCE_WR(ctx, 0x00538/4, 0x00000098); ++ INSTANCE_WR(ctx, 0x00548/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x0054c/4, 0x00ff7000); ++ INSTANCE_WR(ctx, 0x00550/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00560/4, 0x00ff0000); ++ INSTANCE_WR(ctx, 0x00598/4, 0x00ffff00); ++ for (i=0x005dc; i<=0x00618; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00018488); ++ for (i=0x0061c; i<=0x00658; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00028202); ++ for (i=0x0069c; i<=0x006d8; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x0000aae4); ++ for (i=0x006dc; i<=0x00718; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x01012000); ++ for (i=0x0071c; i<=0x00758; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00080008); ++ for (i=0x0079c; i<=0x007d8; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00100008); ++ for (i=0x0082c; i<=0x00838; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x0001bc80); ++ for (i=0x0083c; i<=0x00848; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00000202); ++ for (i=0x0085c; i<=0x00868; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00000008); ++ for (i=0x0087c; i<=0x00888; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00080008); ++ INSTANCE_WR(ctx, 0x0089c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x008d0/4, 0x00000021); ++ INSTANCE_WR(ctx, 0x008d4/4, 0x030c30c3); ++ INSTANCE_WR(ctx, 0x008e0/4, 0x3e020200); ++ INSTANCE_WR(ctx, 0x008e4/4, 0x00ffffff); ++ INSTANCE_WR(ctx, 0x008e8/4, 0x0c103f00); ++ INSTANCE_WR(ctx, 0x008f4/4, 0x00020000); ++ INSTANCE_WR(ctx, 0x0092c/4, 0x00008100); ++ INSTANCE_WR(ctx, 0x009b8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x009fc/4, 0x00001001); ++ INSTANCE_WR(ctx, 0x00a04/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x00a08/4, 0x00888001); ++ INSTANCE_WR(ctx, 0x00a8c/4, 0x00000005); ++ INSTANCE_WR(ctx, 0x00a98/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00ab4/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00ab8/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00abc/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00ac0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00af8/4, 0x00000001); ++ for (i=0x02ec0; i<=0x02f38; i+=8) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i=0x04c80; i<=0x06e70; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for (i=0x06e80; i<=0x07270; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i=0x096c0; i<=0x0b8b0; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for (i=0x0b8c0; i<=0x0bcb0; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i=0x0e100; i<=0x102f0; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for (i=0x10300; i<=0x106f0; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++}; ++ ++static void ++nv46_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int i; ++ ++ INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start); ++ INSTANCE_WR(ctx, 0x00040/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00044/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x0004c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00138/4, 0x20010001); ++ INSTANCE_WR(ctx, 0x0013c/4, 0x0f73ef00); ++ INSTANCE_WR(ctx, 0x00144/4, 0x02008821); ++ INSTANCE_WR(ctx, 0x00174/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00178/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0017c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00180/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00184/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00188/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0018c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00190/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00194/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00198/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x0019c/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x001a4/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x001ec/4, 0x0b0b0b0c); ++ INSTANCE_WR(ctx, 0x0035c/4, 0x00040000); ++ INSTANCE_WR(ctx, 0x0036c/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x00370/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x00374/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x00378/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x003a4/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x003b8/4, 0x00003010); ++ INSTANCE_WR(ctx, 0x003dc/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x003e0/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x003e4/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x003e8/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x003ec/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x003f0/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x003f4/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x003f8/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x003fc/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00400/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00404/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00408/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x0040c/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00410/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00414/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00418/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x004b0/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x004b4/4, 0x00080060); ++ INSTANCE_WR(ctx, 0x004d0/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x004d4/4, 0xffff0000); ++ INSTANCE_WR(ctx, 0x004d8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x004ec/4, 0x46400000); ++ INSTANCE_WR(ctx, 0x004fc/4, 0xffff0000); ++ INSTANCE_WR(ctx, 0x00500/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00504/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00508/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x0050c/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00510/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00514/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00518/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x0051c/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00520/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00524/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00528/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x0052c/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00530/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00534/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00538/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x0053c/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00550/4, 0x0fff0000); ++ INSTANCE_WR(ctx, 0x00554/4, 0x0fff0000); ++ INSTANCE_WR(ctx, 0x0055c/4, 0x00011100); ++ for (i=0x00578; i<0x005b4; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005c0/4, 0x4b7fffff); ++ INSTANCE_WR(ctx, 0x005e8/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x005ec/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x005f0/4, 0xb8a89888); ++ INSTANCE_WR(ctx, 0x005f4/4, 0xf8e8d8c8); ++ INSTANCE_WR(ctx, 0x00608/4, 0x40100000); ++ INSTANCE_WR(ctx, 0x00624/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00658/4, 0x435185d6); ++ INSTANCE_WR(ctx, 0x0065c/4, 0x2155b699); ++ INSTANCE_WR(ctx, 0x00660/4, 0xfedcba98); ++ INSTANCE_WR(ctx, 0x00664/4, 0x00000098); ++ INSTANCE_WR(ctx, 0x00674/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00678/4, 0x00ff7000); ++ INSTANCE_WR(ctx, 0x0067c/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x0068c/4, 0x00ff0000); ++ INSTANCE_WR(ctx, 0x006c8/4, 0x00ffff00); ++ for (i=0x0070c; i<=0x00748; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00018488); ++ for (i=0x0074c; i<=0x00788; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00028202); ++ for (i=0x007cc; i<=0x00808; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x0000aae4); ++ for (i=0x0080c; i<=0x00848; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x01012000); ++ for (i=0x0084c; i<=0x00888; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00080008); ++ for (i=0x008cc; i<=0x00908; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00100008); ++ for (i=0x0095c; i<=0x00968; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x0001bc80); ++ for (i=0x0096c; i<=0x00978; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00000202); ++ for (i=0x0098c; i<=0x00998; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00000008); ++ for (i=0x009ac; i<=0x009b8; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00080008); ++ INSTANCE_WR(ctx, 0x009cc/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x00a00/4, 0x00000421); ++ INSTANCE_WR(ctx, 0x00a04/4, 0x030c30c3); ++ INSTANCE_WR(ctx, 0x00a08/4, 0x00011001); ++ INSTANCE_WR(ctx, 0x00a14/4, 0x3e020200); ++ INSTANCE_WR(ctx, 0x00a18/4, 0x00ffffff); ++ INSTANCE_WR(ctx, 0x00a1c/4, 0x0c103f00); ++ INSTANCE_WR(ctx, 0x00a28/4, 0x00040000); ++ INSTANCE_WR(ctx, 0x00a60/4, 0x00008100); ++ INSTANCE_WR(ctx, 0x00aec/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00b30/4, 0x00001001); ++ INSTANCE_WR(ctx, 0x00b38/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x00b3c/4, 0x00888001); ++ INSTANCE_WR(ctx, 0x00bc0/4, 0x00000005); ++ INSTANCE_WR(ctx, 0x00bcc/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00be8/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00bec/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00bf0/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00bf4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00c2c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00c30/4, 0x08e00001); ++ INSTANCE_WR(ctx, 0x00c34/4, 0x000e3000); ++ for (i=0x017f8; i<=0x01870; i+=8) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i=0x035b8; i<=0x057a8; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for (i=0x057b8; i<=0x05ba8; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i=0x07f38; i<=0x0a128; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for (i=0x0a138; i<=0x0a528; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i=0x0c8b8; i<=0x0eaa8; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for (i=0x0eab8; i<=0x0eea8; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++} ++ ++/* This may only work on 7800 AGP cards, will include a warning */ ++static void ++nv47_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int i; ++ ++ INSTANCE_WR(ctx, 0x00000000/4, ctx->im_pramin->start); ++ INSTANCE_WR(ctx, 0x00000024/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00000028/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00000030/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0000011c/4, 0x20010001); ++ INSTANCE_WR(ctx, 0x00000120/4, 0x0f73ef00); ++ INSTANCE_WR(ctx, 0x00000128/4, 0x02008821); ++ INSTANCE_WR(ctx, 0x00000178/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x0000017c/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00000180/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00000188/4, 0x00000040); ++ for (i=0x00000194; i<=0x000001b0; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x000001d0/4, 0x0b0b0b0c); ++ INSTANCE_WR(ctx, 0x00000340/4, 0x00040000); ++ INSTANCE_WR(ctx, 0x00000350/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x00000354/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x00000358/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x0000035c/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x00000388/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x0000039c/4, 0x00001010); ++ for (i=0x000003c0; i<=0x000003fc; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00000454/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00000458/4, 0x00080060); ++ INSTANCE_WR(ctx, 0x00000474/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x00000478/4, 0xffff0000); ++ INSTANCE_WR(ctx, 0x0000047c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00000490/4, 0x46400000); ++ INSTANCE_WR(ctx, 0x000004a0/4, 0xffff0000); ++ for (i=0x000004a4; i<=0x000004e0; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x000004f4/4, 0x0fff0000); ++ INSTANCE_WR(ctx, 0x000004f8/4, 0x0fff0000); ++ INSTANCE_WR(ctx, 0x00000500/4, 0x00011100); ++ for (i=0x0000051c; i<=0x00000558; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x00000564/4, 0x4b7fffff); ++ INSTANCE_WR(ctx, 0x0000058c/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x00000590/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x00000594/4, 0xb8a89888); ++ INSTANCE_WR(ctx, 0x00000598/4, 0xf8e8d8c8); ++ INSTANCE_WR(ctx, 0x000005ac/4, 0x40100000); ++ INSTANCE_WR(ctx, 0x000005c8/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x000005fc/4, 0x435185d6); ++ INSTANCE_WR(ctx, 0x00000600/4, 0x2155b699); ++ INSTANCE_WR(ctx, 0x00000604/4, 0xfedcba98); ++ INSTANCE_WR(ctx, 0x00000608/4, 0x00000098); ++ INSTANCE_WR(ctx, 0x00000618/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x0000061c/4, 0x00ff7000); ++ INSTANCE_WR(ctx, 0x00000620/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00000630/4, 0x00ff0000); ++ INSTANCE_WR(ctx, 0x0000066c/4, 0x00ffff00); ++ for (i=0x000006b0; i<=0x000006ec; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00018488); ++ for (i=0x000006f0; i<=0x0000072c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00028202); ++ for (i=0x00000770; i<=0x000007ac; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x0000aae4); ++ for (i=0x000007b0; i<=0x000007ec; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x01012000); ++ for (i=0x000007f0; i<=0x0000082c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00080008); ++ for (i=0x00000870; i<=0x000008ac; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00100008); ++ INSTANCE_WR(ctx, 0x00000900/4, 0x0001bc80); ++ INSTANCE_WR(ctx, 0x00000904/4, 0x0001bc80); ++ INSTANCE_WR(ctx, 0x00000908/4, 0x0001bc80); ++ INSTANCE_WR(ctx, 0x0000090c/4, 0x0001bc80); ++ INSTANCE_WR(ctx, 0x00000910/4, 0x00000202); ++ INSTANCE_WR(ctx, 0x00000914/4, 0x00000202); ++ INSTANCE_WR(ctx, 0x00000918/4, 0x00000202); ++ INSTANCE_WR(ctx, 0x0000091c/4, 0x00000202); ++ for (i=0x00000930; i<=0x0000095c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x00000970/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x000009a4/4, 0x00000021); ++ INSTANCE_WR(ctx, 0x000009a8/4, 0x030c30c3); ++ INSTANCE_WR(ctx, 0x000009b4/4, 0x3e020200); ++ INSTANCE_WR(ctx, 0x000009b8/4, 0x00ffffff); ++ INSTANCE_WR(ctx, 0x000009bc/4, 0x40103f00); ++ INSTANCE_WR(ctx, 0x000009c8/4, 0x00040000); ++ INSTANCE_WR(ctx, 0x00000a00/4, 0x00008100); ++ INSTANCE_WR(ctx, 0x00000a8c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00000ad0/4, 0x00001001); ++ INSTANCE_WR(ctx, 0x00000adc/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x00000ae0/4, 0x00888001); ++ for (i=0x00000b10; i<=0x00000b8c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00000bb4/4, 0x00000005); ++ INSTANCE_WR(ctx, 0x00000bc0/4, 0x0000ffff); ++ for (i=0x00000bdc; i<=0x00000bf8; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00000bfc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00000c34/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00000c38/4, 0x08e00001); ++ INSTANCE_WR(ctx, 0x00000c3c/4, 0x000e3000); ++ for (i=0x00003000; i<=0x00003078; i+=8) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i=0x00004dc0; i<=0x00006fb0; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for (i=0x00006fc0; i<=0x000073b0; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i=0x00009800; i<=0x0000b9f0; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for (i=0x0000ba00; i<=0x00010430; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i=0x00010440; i<=0x00010830; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i=0x00012c80; i<=0x00014e70; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for (i=0x00014e80; i<=0x00015270; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i=0x000176c0; i<=0x000198b0; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for (i=0x000198c0; i<=0x00019cb0; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i=0x0001c100; i<=0x0001e2f0; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for (i=0x0001e300; i<=0x0001e6f0; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++} ++ ++static void ++nv49_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int i; ++ ++ INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start); ++ INSTANCE_WR(ctx, 0x00004/4, 0x0000c040); ++ INSTANCE_WR(ctx, 0x00008/4, 0x0000c040); ++ INSTANCE_WR(ctx, 0x0000c/4, 0x0000c040); ++ INSTANCE_WR(ctx, 0x00010/4, 0x0000c040); ++ INSTANCE_WR(ctx, 0x00014/4, 0x0000c040); ++ INSTANCE_WR(ctx, 0x00018/4, 0x0000c040); ++ INSTANCE_WR(ctx, 0x0001c/4, 0x0000c040); ++ INSTANCE_WR(ctx, 0x00020/4, 0x0000c040); ++ INSTANCE_WR(ctx, 0x000c4/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x000c8/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x000d0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x001bc/4, 0x20010001); ++ INSTANCE_WR(ctx, 0x001c0/4, 0x0f73ef00); ++ INSTANCE_WR(ctx, 0x001c8/4, 0x02008821); ++ INSTANCE_WR(ctx, 0x00218/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x0021c/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00220/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00228/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00234/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x00238/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x0023c/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x00240/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x00244/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x00248/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x0024c/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x00250/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x00270/4, 0x0b0b0b0c); ++ INSTANCE_WR(ctx, 0x003e0/4, 0x00040000); ++ INSTANCE_WR(ctx, 0x003f0/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x003f4/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x003f8/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x003fc/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x00428/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x0043c/4, 0x00001010); ++ INSTANCE_WR(ctx, 0x00460/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00464/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00468/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x0046c/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00470/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00474/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00478/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x0047c/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00480/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00484/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00488/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x0048c/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00490/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00494/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00498/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x0049c/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x004f4/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x004f8/4, 0x00080060); ++ INSTANCE_WR(ctx, 0x00514/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x00518/4, 0xffff0000); ++ INSTANCE_WR(ctx, 0x0051c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00530/4, 0x46400000); ++ INSTANCE_WR(ctx, 0x00540/4, 0xffff0000); ++ INSTANCE_WR(ctx, 0x00544/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00548/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x0054c/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00550/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00554/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00558/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x0055c/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00560/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00564/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00568/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x0056c/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00570/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00574/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00578/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x0057c/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00580/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00594/4, 0x0fff0000); ++ INSTANCE_WR(ctx, 0x00598/4, 0x0fff0000); ++ INSTANCE_WR(ctx, 0x005a0/4, 0x00011100); ++ INSTANCE_WR(ctx, 0x005bc/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005c0/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005c4/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005c8/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005cc/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005d0/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005d4/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005d8/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005dc/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005e0/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005e4/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005e8/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005ec/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005f0/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005f4/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005f8/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x00604/4, 0x4b7fffff); ++ INSTANCE_WR(ctx, 0x0062c/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x00630/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x00634/4, 0xb8a89888); ++ INSTANCE_WR(ctx, 0x00638/4, 0xf8e8d8c8); ++ INSTANCE_WR(ctx, 0x0064c/4, 0x40100000); ++ INSTANCE_WR(ctx, 0x00668/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x0069c/4, 0x435185d6); ++ INSTANCE_WR(ctx, 0x006a0/4, 0x2155b699); ++ INSTANCE_WR(ctx, 0x006a4/4, 0xfedcba98); ++ INSTANCE_WR(ctx, 0x006a8/4, 0x00000098); ++ INSTANCE_WR(ctx, 0x006b8/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x006bc/4, 0x00ff7000); ++ INSTANCE_WR(ctx, 0x006c0/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x006d0/4, 0x00ff0000); ++ INSTANCE_WR(ctx, 0x0070c/4, 0x00ffff00); ++ for (i=0x00750; i<=0x0078c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00018488); ++ for (i=0x00790; i<=0x007cc; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00028202); ++ for (i=0x00810; i<=0x0084c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x0000aae4); ++ for (i=0x00850; i<=0x0088c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x01012000); ++ for (i=0x00890; i<=0x008cc; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00080008); ++ for (i=0x00910; i<=0x0094c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00100008); ++ for (i=0x009a0; i<=0x009ac; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x0001bc80); ++ for (i=0x009b0; i<=0x009bc; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00000202); ++ for (i=0x009d0; i<=0x009dc; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00000008); ++ for (i=0x009f0; i<=0x009fc; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00080008); ++ INSTANCE_WR(ctx, 0x00a10/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x00a44/4, 0x00000421); ++ INSTANCE_WR(ctx, 0x00a48/4, 0x030c30c3); ++ INSTANCE_WR(ctx, 0x00a54/4, 0x3e020200); ++ INSTANCE_WR(ctx, 0x00a58/4, 0x00ffffff); ++ INSTANCE_WR(ctx, 0x00a5c/4, 0x20103f00); ++ INSTANCE_WR(ctx, 0x00a68/4, 0x00040000); ++ INSTANCE_WR(ctx, 0x00aa0/4, 0x00008100); ++ INSTANCE_WR(ctx, 0x00b2c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00b70/4, 0x00001001); ++ INSTANCE_WR(ctx, 0x00b7c/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x00b80/4, 0x00888001); ++ INSTANCE_WR(ctx, 0x00bb0/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bb4/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bb8/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bbc/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bc0/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bc4/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bc8/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bcc/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bd0/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bd4/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bd8/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bdc/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00be0/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00be4/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00be8/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bec/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bf0/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bf4/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bf8/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bfc/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00c00/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00c04/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00c08/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00c0c/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00c10/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00c14/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00c18/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00c1c/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00c20/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00c24/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00c28/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00c2c/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00c54/4, 0x00000005); ++ INSTANCE_WR(ctx, 0x00c60/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00c7c/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00c80/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00c84/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00c88/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00c8c/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00c90/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00c94/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00c98/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00c9c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00cd4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00cd8/4, 0x08e00001); ++ INSTANCE_WR(ctx, 0x00cdc/4, 0x000e3000); ++ for(i=0x030a0; i<=0x03118; i+=8) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for(i=0x098a0; i<=0x0ba90; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for(i=0x0baa0; i<=0x0be90; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for(i=0x0e2e0; i<=0x0fff0; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for(i=0x10008; i<=0x104d0; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for(i=0x104e0; i<=0x108d0; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for(i=0x12d20; i<=0x14f10; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for(i=0x14f20; i<=0x15310; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for(i=0x17760; i<=0x19950; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for(i=0x19960; i<=0x19d50; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for(i=0x1c1a0; i<=0x1e390; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for(i=0x1e3a0; i<=0x1e790; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for(i=0x20be0; i<=0x22dd0; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for(i=0x22de0; i<=0x231d0; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++} ++ ++static void ++nv4a_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int i; ++ ++ INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start); ++ INSTANCE_WR(ctx, 0x00024/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00028/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00030/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0011c/4, 0x20010001); ++ INSTANCE_WR(ctx, 0x00120/4, 0x0f73ef00); ++ INSTANCE_WR(ctx, 0x00128/4, 0x02008821); ++ INSTANCE_WR(ctx, 0x00158/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0015c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00160/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00164/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00168/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0016c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00170/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00174/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00178/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x0017c/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00180/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00188/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x001d0/4, 0x0b0b0b0c); ++ INSTANCE_WR(ctx, 0x00340/4, 0x00040000); ++ INSTANCE_WR(ctx, 0x00350/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x00354/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x00358/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x0035c/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x00388/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x0039c/4, 0x00003010); ++ INSTANCE_WR(ctx, 0x003cc/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x003d0/4, 0x00080060); ++ INSTANCE_WR(ctx, 0x003ec/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x003f0/4, 0xffff0000); ++ INSTANCE_WR(ctx, 0x003f4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00408/4, 0x46400000); ++ INSTANCE_WR(ctx, 0x00418/4, 0xffff0000); ++ INSTANCE_WR(ctx, 0x00424/4, 0x0fff0000); ++ INSTANCE_WR(ctx, 0x00428/4, 0x0fff0000); ++ INSTANCE_WR(ctx, 0x00430/4, 0x00011100); ++ for (i=0x0044c; i<=0x00488; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x00494/4, 0x4b7fffff); ++ INSTANCE_WR(ctx, 0x004bc/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x004c0/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x004c4/4, 0xb8a89888); ++ INSTANCE_WR(ctx, 0x004c8/4, 0xf8e8d8c8); ++ INSTANCE_WR(ctx, 0x004dc/4, 0x40100000); ++ INSTANCE_WR(ctx, 0x004f8/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x0052c/4, 0x435185d6); ++ INSTANCE_WR(ctx, 0x00530/4, 0x2155b699); ++ INSTANCE_WR(ctx, 0x00534/4, 0xfedcba98); ++ INSTANCE_WR(ctx, 0x00538/4, 0x00000098); ++ INSTANCE_WR(ctx, 0x00548/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x0054c/4, 0x00ff7000); ++ INSTANCE_WR(ctx, 0x00550/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x0055c/4, 0x00ff0000); ++ INSTANCE_WR(ctx, 0x00594/4, 0x00ffff00); ++ for (i=0x005d8; i<=0x00614; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00018488); ++ for (i=0x00618; i<=0x00654; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00028202); ++ for (i=0x00698; i<=0x006d4; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x0000aae4); ++ for (i=0x006d8; i<=0x00714; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x01012000); ++ for (i=0x00718; i<=0x00754; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00080008); ++ for (i=0x00798; i<=0x007d4; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00100008); ++ for (i=0x00828; i<=0x00834; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x0001bc80); ++ for (i=0x00838; i<=0x00844; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00000202); ++ for (i=0x00858; i<=0x00864; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00000008); ++ for (i=0x00878; i<=0x00884; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00080008); ++ INSTANCE_WR(ctx, 0x00898/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x008cc/4, 0x00000021); ++ INSTANCE_WR(ctx, 0x008d0/4, 0x030c30c3); ++ INSTANCE_WR(ctx, 0x008d4/4, 0x00011001); ++ INSTANCE_WR(ctx, 0x008e0/4, 0x3e020200); ++ INSTANCE_WR(ctx, 0x008e4/4, 0x00ffffff); ++ INSTANCE_WR(ctx, 0x008e8/4, 0x0c103f00); ++ INSTANCE_WR(ctx, 0x008f4/4, 0x00040000); ++ INSTANCE_WR(ctx, 0x0092c/4, 0x00008100); ++ INSTANCE_WR(ctx, 0x009b8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x009fc/4, 0x00001001); ++ INSTANCE_WR(ctx, 0x00a04/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x00a08/4, 0x00888001); ++ INSTANCE_WR(ctx, 0x00a8c/4, 0x00000005); ++ INSTANCE_WR(ctx, 0x00a98/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00ab4/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00ab8/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00abc/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00ac0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00af8/4, 0x00000001); ++ for (i=0x016c0; i<=0x01738; i+=8) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i=0x03840; i<=0x05670; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for (i=0x05680; i<=0x05a70; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i=0x07e00; i<=0x09ff0; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for (i=0x0a000; i<=0x0a3f0; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i=0x0c780; i<=0x0e970; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for (i=0x0e980; i<=0x0ed70; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++} ++ ++static void ++nv4b_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int i; ++ ++ INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start); ++ INSTANCE_WR(ctx, 0x00004/4, 0x0000c040); ++ INSTANCE_WR(ctx, 0x00008/4, 0x0000c040); ++ INSTANCE_WR(ctx, 0x0000c/4, 0x0000c040); ++ INSTANCE_WR(ctx, 0x00010/4, 0x0000c040); ++ INSTANCE_WR(ctx, 0x00014/4, 0x0000c040); ++ INSTANCE_WR(ctx, 0x00018/4, 0x0000c040); ++ INSTANCE_WR(ctx, 0x0001c/4, 0x0000c040); ++ INSTANCE_WR(ctx, 0x00020/4, 0x0000c040); ++ INSTANCE_WR(ctx, 0x000c4/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x000c8/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x000d0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x001bc/4, 0x20010001); ++ INSTANCE_WR(ctx, 0x001c0/4, 0x0f73ef00); ++ INSTANCE_WR(ctx, 0x001c8/4, 0x02008821); ++ INSTANCE_WR(ctx, 0x00218/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x0021c/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00220/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00228/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00234/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x00238/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x0023c/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x00240/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x00244/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x00248/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x0024c/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x00250/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x00270/4, 0x0b0b0b0c); ++ INSTANCE_WR(ctx, 0x003e0/4, 0x00040000); ++ INSTANCE_WR(ctx, 0x003f0/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x003f4/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x003f8/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x003fc/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x00428/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x0043c/4, 0x00001010); ++ INSTANCE_WR(ctx, 0x00460/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00464/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00468/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x0046c/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00470/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00474/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00478/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x0047c/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00480/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00484/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00488/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x0048c/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00490/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00494/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00498/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x0049c/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x004f4/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x004f8/4, 0x00080060); ++ INSTANCE_WR(ctx, 0x00514/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x00518/4, 0xffff0000); ++ INSTANCE_WR(ctx, 0x0051c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00530/4, 0x46400000); ++ INSTANCE_WR(ctx, 0x00540/4, 0xffff0000); ++ INSTANCE_WR(ctx, 0x00544/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00548/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x0054c/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00550/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00554/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00558/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x0055c/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00560/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00564/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00568/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x0056c/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00570/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00574/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00578/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x0057c/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00580/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00594/4, 0x0fff0000); ++ INSTANCE_WR(ctx, 0x00598/4, 0x0fff0000); ++ INSTANCE_WR(ctx, 0x005a0/4, 0x00011100); ++ INSTANCE_WR(ctx, 0x005bc/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005c0/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005c4/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005c8/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005cc/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005d0/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005d4/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005d8/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005dc/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005e0/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005e4/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005e8/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005ec/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005f0/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005f4/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005f8/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x00604/4, 0x4b7fffff); ++ INSTANCE_WR(ctx, 0x0062c/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x00630/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x00634/4, 0xb8a89888); ++ INSTANCE_WR(ctx, 0x00638/4, 0xf8e8d8c8); ++ INSTANCE_WR(ctx, 0x0064c/4, 0x40100000); ++ INSTANCE_WR(ctx, 0x00668/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x0069c/4, 0x435185d6); ++ INSTANCE_WR(ctx, 0x006a0/4, 0x2155b699); ++ INSTANCE_WR(ctx, 0x006a4/4, 0xfedcba98); ++ INSTANCE_WR(ctx, 0x006a8/4, 0x00000098); ++ INSTANCE_WR(ctx, 0x006b8/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x006bc/4, 0x00ff7000); ++ INSTANCE_WR(ctx, 0x006c0/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x006d0/4, 0x00ff0000); ++ INSTANCE_WR(ctx, 0x0070c/4, 0x00ffff00); ++ for (i=0x00750; i<=0x0078c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00018488); ++ for (i=0x00790; i<=0x007cc; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00028202); ++ for (i=0x00810; i<=0x0084c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x0000aae4); ++ for (i=0x00850; i<=0x0088c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x01012000); ++ for (i=0x00890; i<=0x008cc; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00080008); ++ for (i=0x00910; i<=0x0094c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00100008); ++ for (i=0x009a0; i<=0x009ac; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x0001bc80); ++ for (i=0x009b0; i<=0x009bc; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00000202); ++ for (i=0x009d0; i<=0x009dc; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00000008); ++ for (i=0x009f0; i<=0x009fc; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00080008); ++ INSTANCE_WR(ctx, 0x00a10/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x00a44/4, 0x00000421); ++ INSTANCE_WR(ctx, 0x00a48/4, 0x030c30c3); ++ INSTANCE_WR(ctx, 0x00a54/4, 0x3e020200); ++ INSTANCE_WR(ctx, 0x00a58/4, 0x00ffffff); ++ INSTANCE_WR(ctx, 0x00a5c/4, 0x20103f00); ++ INSTANCE_WR(ctx, 0x00a68/4, 0x00040000); ++ INSTANCE_WR(ctx, 0x00aa0/4, 0x00008100); ++ INSTANCE_WR(ctx, 0x00b2c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00b70/4, 0x00001001); ++ INSTANCE_WR(ctx, 0x00b7c/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x00b80/4, 0x00888001); ++ INSTANCE_WR(ctx, 0x00bb0/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bb4/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bb8/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bbc/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bc0/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bc4/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bc8/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bcc/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bd0/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bd4/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bd8/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bdc/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00be0/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00be4/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00be8/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bec/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bf0/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bf4/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bf8/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bfc/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00c00/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00c04/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00c08/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00c0c/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00c10/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00c14/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00c18/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00c1c/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00c20/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00c24/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00c28/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00c2c/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00c54/4, 0x00000005); ++ INSTANCE_WR(ctx, 0x00c60/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00c7c/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00c80/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00c84/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00c88/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00c8c/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00c90/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00c94/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00c98/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00c9c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00cd4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00cd8/4, 0x08e00001); ++ INSTANCE_WR(ctx, 0x00cdc/4, 0x000e3000); ++ for(i=0x030a0; i<=0x03118; i+=8) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for(i=0x098a0; i<=0x0ba90; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for(i=0x0baa0; i<=0x0be90; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for(i=0x0e2e0; i<=0x0fff0; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for(i=0x10008; i<=0x104d0; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for(i=0x104e0; i<=0x108d0; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for(i=0x12d20; i<=0x14f10; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for(i=0x14f20; i<=0x15310; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for(i=0x17760; i<=0x19950; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for(i=0x19960; i<=0x19d50; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++} ++ ++static void ++nv4c_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int i; ++ ++ INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start); ++ INSTANCE_WR(ctx, 0x00024/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00028/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00030/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0011c/4, 0x20010001); ++ INSTANCE_WR(ctx, 0x00120/4, 0x0f73ef00); ++ INSTANCE_WR(ctx, 0x00128/4, 0x02008821); ++ INSTANCE_WR(ctx, 0x00158/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0015c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00160/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00164/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00168/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0016c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00170/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00174/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00178/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x0017c/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00180/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00188/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x001d0/4, 0x0b0b0b0c); ++ INSTANCE_WR(ctx, 0x00340/4, 0x00040000); ++ INSTANCE_WR(ctx, 0x00350/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x00354/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x00358/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x0035c/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x00388/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x0039c/4, 0x00001010); ++ INSTANCE_WR(ctx, 0x003d0/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x003d4/4, 0x00080060); ++ INSTANCE_WR(ctx, 0x003f0/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x003f4/4, 0xffff0000); ++ INSTANCE_WR(ctx, 0x003f8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0040c/4, 0x46400000); ++ INSTANCE_WR(ctx, 0x0041c/4, 0xffff0000); ++ INSTANCE_WR(ctx, 0x00428/4, 0x0fff0000); ++ INSTANCE_WR(ctx, 0x0042c/4, 0x0fff0000); ++ INSTANCE_WR(ctx, 0x00434/4, 0x00011100); ++ for (i=0x00450; i<0x0048c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x00498/4, 0x4b7fffff); ++ INSTANCE_WR(ctx, 0x004c0/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x004c4/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x004c8/4, 0xb8a89888); ++ INSTANCE_WR(ctx, 0x004cc/4, 0xf8e8d8c8); ++ INSTANCE_WR(ctx, 0x004e0/4, 0x40100000); ++ INSTANCE_WR(ctx, 0x004fc/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00530/4, 0x435185d6); ++ INSTANCE_WR(ctx, 0x00534/4, 0x2155b699); ++ INSTANCE_WR(ctx, 0x00538/4, 0xfedcba98); ++ INSTANCE_WR(ctx, 0x0053c/4, 0x00000098); ++ INSTANCE_WR(ctx, 0x0054c/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00550/4, 0x00ff7000); ++ INSTANCE_WR(ctx, 0x00554/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00564/4, 0x00ff0000); ++ INSTANCE_WR(ctx, 0x0059c/4, 0x00ffff00); ++ for (i=0x005e0; i<=0x0061c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00018488); ++ for (i=0x00620; i<=0x0065c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00028202); ++ for (i=0x006a0; i<=0x006dc; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x0000aae4); ++ for (i=0x006e0; i<=0x0071c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x01012000); ++ for (i=0x00720; i<=0x0075c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00080008); ++ for (i=0x007a0; i<=0x007dc; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00100008); ++ for (i=0x00830; i<=0x0083c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x0001bc80); ++ for (i=0x00840; i<=0x0084c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00000202); ++ for (i=0x00860; i<=0x0086c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00000008); ++ for (i=0x00880; i<=0x0088c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00080008); ++ INSTANCE_WR(ctx, 0x008a0/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x008d4/4, 0x00000020); ++ INSTANCE_WR(ctx, 0x008d8/4, 0x030c30c3); ++ INSTANCE_WR(ctx, 0x008dc/4, 0x00011001); ++ INSTANCE_WR(ctx, 0x008e8/4, 0x3e020200); ++ INSTANCE_WR(ctx, 0x008ec/4, 0x00ffffff); ++ INSTANCE_WR(ctx, 0x008f0/4, 0x0c103f00); ++ INSTANCE_WR(ctx, 0x008fc/4, 0x00040000); ++ INSTANCE_WR(ctx, 0x00934/4, 0x00008100); ++ INSTANCE_WR(ctx, 0x009c0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00a04/4, 0x00001001); ++ INSTANCE_WR(ctx, 0x00a0c/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x00a10/4, 0x00888001); ++ INSTANCE_WR(ctx, 0x00a74/4, 0x00000005); ++ INSTANCE_WR(ctx, 0x00a80/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00a9c/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00aa0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00ad8/4, 0x00000001); ++ for (i=0x016a0; i<0x01718; i+=8) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i=0x03460; i<0x05650; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for (i=0x05660; i<0x05a50; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++} ++ ++static void ++nv4e_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int i; ++ ++ INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start); ++ INSTANCE_WR(ctx, 0x00024/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00028/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00030/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0011c/4, 0x20010001); ++ INSTANCE_WR(ctx, 0x00120/4, 0x0f73ef00); ++ INSTANCE_WR(ctx, 0x00128/4, 0x02008821); ++ INSTANCE_WR(ctx, 0x00158/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0015c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00160/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00164/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00168/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0016c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00170/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00174/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00178/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x0017c/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00180/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00188/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x001d0/4, 0x0b0b0b0c); ++ INSTANCE_WR(ctx, 0x00340/4, 0x00040000); ++ INSTANCE_WR(ctx, 0x00350/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x00354/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x00358/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x0035c/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x00388/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x0039c/4, 0x00001010); ++ INSTANCE_WR(ctx, 0x003cc/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x003d0/4, 0x00080060); ++ INSTANCE_WR(ctx, 0x003ec/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x003f0/4, 0xffff0000); ++ INSTANCE_WR(ctx, 0x003f4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00408/4, 0x46400000); ++ INSTANCE_WR(ctx, 0x00418/4, 0xffff0000); ++ INSTANCE_WR(ctx, 0x00424/4, 0x0fff0000); ++ INSTANCE_WR(ctx, 0x00428/4, 0x0fff0000); ++ INSTANCE_WR(ctx, 0x00430/4, 0x00011100); ++ for (i=0x0044c; i<=0x00488; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x00494/4, 0x4b7fffff); ++ INSTANCE_WR(ctx, 0x004bc/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x004c0/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x004c4/4, 0xb8a89888); ++ INSTANCE_WR(ctx, 0x004c8/4, 0xf8e8d8c8); ++ INSTANCE_WR(ctx, 0x004dc/4, 0x40100000); ++ INSTANCE_WR(ctx, 0x004f8/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x0052c/4, 0x435185d6); ++ INSTANCE_WR(ctx, 0x00530/4, 0x2155b699); ++ INSTANCE_WR(ctx, 0x00534/4, 0xfedcba98); ++ INSTANCE_WR(ctx, 0x00538/4, 0x00000098); ++ INSTANCE_WR(ctx, 0x00548/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x0054c/4, 0x00ff7000); ++ INSTANCE_WR(ctx, 0x00550/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x0055c/4, 0x00ff0000); ++ INSTANCE_WR(ctx, 0x00594/4, 0x00ffff00); ++ for (i=0x005d8; i<=0x00614; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00018488); ++ for (i=0x00618; i<=0x00654; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00028202); ++ for (i=0x00698; i<=0x006d4; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x0000aae4); ++ for (i=0x006d8; i<=0x00714; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x01012000); ++ for (i=0x00718; i<=0x00754; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00080008); ++ for (i=0x00798; i<=0x007d4; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00100008); ++ for (i=0x00828; i<=0x00834; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x0001bc80); ++ for (i=0x00838; i<=0x00844; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00000202); ++ for (i=0x00858; i<=0x00864; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00000008); ++ for (i=0x00878; i<=0x00884; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00080008); ++ INSTANCE_WR(ctx, 0x00898/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x008cc/4, 0x00000020); ++ INSTANCE_WR(ctx, 0x008d0/4, 0x030c30c3); ++ INSTANCE_WR(ctx, 0x008d4/4, 0x00011001); ++ INSTANCE_WR(ctx, 0x008e0/4, 0x3e020200); ++ INSTANCE_WR(ctx, 0x008e4/4, 0x00ffffff); ++ INSTANCE_WR(ctx, 0x008e8/4, 0x0c103f00); ++ INSTANCE_WR(ctx, 0x008f4/4, 0x00040000); ++ INSTANCE_WR(ctx, 0x0092c/4, 0x00008100); ++ INSTANCE_WR(ctx, 0x009b8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x009fc/4, 0x00001001); ++ INSTANCE_WR(ctx, 0x00a04/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x00a08/4, 0x00888001); ++ INSTANCE_WR(ctx, 0x00a6c/4, 0x00000005); ++ INSTANCE_WR(ctx, 0x00a78/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00a94/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00a98/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00aa4/4, 0x00000001); ++ for (i=0x01668; i<=0x016e0; i+=8) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i=0x03428; i<=0x05618; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for (i=0x05628; i<=0x05a18; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++} ++ ++int ++nv40_graph_create_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ void (*ctx_init)(struct drm_device *, struct nouveau_gpuobj *); ++ int ret; ++ ++ /* These functions populate the graphics context with a whole heap ++ * of default state. All these functions are very similar, with ++ * a minimal amount of chipset-specific changes. However, as we're ++ * currently dependant on the context programs used by the NVIDIA ++ * binary driver these functions must match the layout expected by ++ * them. Hopefully at some point this will all change. ++ */ ++ switch (dev_priv->chipset) { ++ case 0x40: ++ ctx_init = nv40_graph_context_init; ++ break; ++ case 0x41: ++ case 0x42: ++ ctx_init = nv41_graph_context_init; ++ break; ++ case 0x43: ++ ctx_init = nv43_graph_context_init; ++ break; ++ case 0x46: ++ ctx_init = nv46_graph_context_init; ++ break; ++ case 0x47: ++ ctx_init = nv47_graph_context_init; ++ break; ++ case 0x49: ++ ctx_init = nv49_graph_context_init; ++ break; ++ case 0x44: ++ case 0x4a: ++ ctx_init = nv4a_graph_context_init; ++ break; ++ case 0x4b: ++ ctx_init = nv4b_graph_context_init; ++ break; ++ case 0x4c: ++ case 0x67: ++ ctx_init = nv4c_graph_context_init; ++ break; ++ case 0x4e: ++ ctx_init = nv4e_graph_context_init; ++ break; ++ default: ++ ctx_init = nv40_graph_context_init; ++ break; ++ } ++ ++ /* Allocate a 175KiB block of PRAMIN to store the context. This ++ * is massive overkill for a lot of chipsets, but it should be safe ++ * until we're able to implement this properly (will happen at more ++ * or less the same time we're able to write our own context programs. ++ */ ++ if ((ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 175*1024, 16, ++ NVOBJ_FLAG_ZERO_ALLOC, ++ &chan->ramin_grctx))) ++ return ret; ++ ++ /* Initialise default context values */ ++ ctx_init(dev, chan->ramin_grctx->gpuobj); ++ ++ return 0; ++} ++ ++void ++nv40_graph_destroy_context(struct nouveau_channel *chan) ++{ ++ nouveau_gpuobj_ref_del(chan->dev, &chan->ramin_grctx); ++} ++ ++static int ++nv40_graph_transfer_context(struct drm_device *dev, uint32_t inst, int save) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t old_cp, tv = 1000, tmp; ++ int i; ++ ++ old_cp = NV_READ(NV20_PGRAPH_CHANNEL_CTX_POINTER); ++ NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, inst); ++ ++ tmp = NV_READ(NV40_PGRAPH_CTXCTL_0310); ++ tmp |= save ? NV40_PGRAPH_CTXCTL_0310_XFER_SAVE : ++ NV40_PGRAPH_CTXCTL_0310_XFER_LOAD; ++ NV_WRITE(NV40_PGRAPH_CTXCTL_0310, tmp); ++ ++ tmp = NV_READ(NV40_PGRAPH_CTXCTL_0304); ++ tmp |= NV40_PGRAPH_CTXCTL_0304_XFER_CTX; ++ NV_WRITE(NV40_PGRAPH_CTXCTL_0304, tmp); ++ ++ nouveau_wait_for_idle(dev); ++ ++ for (i = 0; i < tv; i++) { ++ if (NV_READ(NV40_PGRAPH_CTXCTL_030C) == 0) ++ break; ++ } ++ ++ NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, old_cp); ++ ++ if (i == tv) { ++ uint32_t ucstat = NV_READ(NV40_PGRAPH_CTXCTL_UCODE_STAT); ++ DRM_ERROR("Failed: Instance=0x%08x Save=%d\n", inst, save); ++ DRM_ERROR("IP: 0x%02x, Opcode: 0x%08x\n", ++ ucstat >> NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_SHIFT, ++ ucstat & NV40_PGRAPH_CTXCTL_UCODE_STAT_OP_MASK); ++ DRM_ERROR("0x40030C = 0x%08x\n", ++ NV_READ(NV40_PGRAPH_CTXCTL_030C)); ++ return -EBUSY; ++ } ++ ++ return 0; ++} ++ ++/* Save current context (from PGRAPH) into the channel's context */ ++int ++nv40_graph_save_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ uint32_t inst; ++ ++ if (!chan->ramin_grctx) ++ return -EINVAL; ++ inst = chan->ramin_grctx->instance >> 4; ++ ++ return nv40_graph_transfer_context(dev, inst, 1); ++} ++ ++/* Restore the context for a specific channel into PGRAPH */ ++int ++nv40_graph_load_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t inst; ++ int ret; ++ ++ if (!chan->ramin_grctx) ++ return -EINVAL; ++ inst = chan->ramin_grctx->instance >> 4; ++ ++ ret = nv40_graph_transfer_context(dev, inst, 0); ++ if (ret) ++ return ret; ++ ++ /* 0x40032C, no idea of it's exact function. Could simply be a ++ * record of the currently active PGRAPH context. It's currently ++ * unknown as to what bit 24 does. The nv ddx has it set, so we will ++ * set it here too. ++ */ ++ NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, inst); ++ NV_WRITE(NV40_PGRAPH_CTXCTL_CUR, ++ (inst & NV40_PGRAPH_CTXCTL_CUR_INST_MASK) | ++ NV40_PGRAPH_CTXCTL_CUR_LOADED); ++ /* 0x32E0 records the instance address of the active FIFO's PGRAPH ++ * context. If at any time this doesn't match 0x40032C, you will ++ * recieve PGRAPH_INTR_CONTEXT_SWITCH ++ */ ++ NV_WRITE(NV40_PFIFO_GRCTX_INSTANCE, inst); ++ return 0; ++} ++ ++/* These blocks of "magic numbers" are actually a microcode that the GPU uses ++ * to control how graphics contexts get saved and restored between PRAMIN ++ * and PGRAPH during a context switch. We're currently using values seen ++ * in mmio-traces of the binary driver. ++ */ ++static uint32_t nv40_ctx_prog[] = { ++ 0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001, ++ 0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00408f65, 0x00409406, ++ 0x0040a268, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080, ++ 0x004014e6, 0x007000a0, 0x00401a84, 0x00700082, 0x00600001, 0x00500061, ++ 0x00600002, 0x00401b68, 0x00500060, 0x00200001, 0x0060000a, 0x0011814d, ++ 0x00110158, 0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4, ++ 0x001041c9, 0x0010c1dc, 0x00110205, 0x0011420a, 0x00114210, 0x00110216, ++ 0x0012421b, 0x00120270, 0x001242c0, 0x00200040, 0x00100280, 0x00128100, ++ 0x00128120, 0x00128143, 0x0011415f, 0x0010815c, 0x0010c140, 0x00104029, ++ 0x00110400, 0x00104d10, 0x00500060, 0x00403b87, 0x0060000d, 0x004076e6, ++ 0x002000f0, 0x0060000a, 0x00200045, 0x00100620, 0x00108668, 0x0011466b, ++ 0x00120682, 0x0011068b, 0x00168691, 0x0010c6ae, 0x001206b4, 0x0020002a, ++ 0x001006c4, 0x001246f0, 0x002000c0, 0x00100700, 0x0010c3d7, 0x001043e1, ++ 0x00500060, 0x00405600, 0x00405684, 0x00600003, 0x00500067, 0x00600008, ++ 0x00500060, 0x00700082, 0x0020026c, 0x0060000a, 0x00104800, 0x00104901, ++ 0x00120920, 0x00200035, 0x00100940, 0x00148a00, 0x00104a14, 0x00200038, ++ 0x00100b00, 0x00138d00, 0x00104e00, 0x0012d600, 0x00105c00, 0x00104f06, ++ 0x0020031a, 0x0060000a, 0x00300000, 0x00200680, 0x00406c00, 0x00200684, ++ 0x00800001, 0x00200b62, 0x0060000a, 0x0020a0b0, 0x0040728a, 0x00201b68, ++ 0x00800041, 0x00407684, 0x00203e60, 0x00800002, 0x00408700, 0x00600006, ++ 0x00700003, 0x004080e6, 0x00700080, 0x0020031a, 0x0060000a, 0x00200004, ++ 0x00800001, 0x00700000, 0x00200000, 0x0060000a, 0x00106002, 0x0040a284, ++ 0x00700002, 0x00600004, 0x0040a268, 0x00700000, 0x00200000, 0x0060000a, ++ 0x00106002, 0x00700080, 0x00400a84, 0x00700002, 0x00400a68, 0x00500060, ++ 0x00600007, 0x00409388, 0x0060000f, 0x00000000, 0x00500060, 0x00200000, ++ 0x0060000a, 0x00700000, 0x00106001, 0x00700083, 0x00910880, 0x00901ffe, ++ 0x00940400, 0x00200020, 0x0060000b, 0x00500069, 0x0060000c, 0x00401b68, ++ 0x0040a406, 0x0040a505, 0x00600009, 0x00700005, 0x00700006, 0x0060000e, ++ ~0 ++}; ++ ++static uint32_t nv41_ctx_prog[] = { ++ 0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001, ++ 0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00408f65, 0x00409306, ++ 0x0040a068, 0x0040198f, 0x00200001, 0x0060000a, 0x00700080, 0x00104042, ++ 0x00200001, 0x0060000a, 0x00700000, 0x001040c5, 0x00401826, 0x00401968, ++ 0x0060000d, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080, ++ 0x004020e6, 0x007000a0, 0x00500060, 0x00200001, 0x0060000a, 0x0011814d, ++ 0x00110158, 0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4, ++ 0x001041c9, 0x0010c1dc, 0x00150210, 0x0012c225, 0x00108238, 0x0010823e, ++ 0x001242c0, 0x00200040, 0x00100280, 0x00128100, 0x00128120, 0x00128143, ++ 0x0011415f, 0x0010815c, 0x0010c140, 0x00104029, 0x00110400, 0x00104d10, ++ 0x001046ec, 0x00500060, 0x00404087, 0x0060000d, 0x004079e6, 0x002000f1, ++ 0x0060000a, 0x00148653, 0x00104668, 0x0010c66d, 0x00120682, 0x0011068b, ++ 0x00168691, 0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4, 0x001146c6, ++ 0x00200020, 0x001006cc, 0x001046ed, 0x001246f0, 0x002000c0, 0x00100700, ++ 0x0010c3d7, 0x001043e1, 0x00500060, 0x00200233, 0x0060000a, 0x00104800, ++ 0x00108901, 0x00124920, 0x0020001f, 0x00100940, 0x00140965, 0x00148a00, ++ 0x00108a14, 0x00200020, 0x00100b00, 0x00134b2c, 0x0010cd00, 0x0010cd04, ++ 0x00114d08, 0x00104d80, 0x00104e00, 0x0012d600, 0x00105c00, 0x00104f06, ++ 0x002002d2, 0x0060000a, 0x00300000, 0x00200680, 0x00407200, 0x00200684, ++ 0x00800001, 0x00200b1a, 0x0060000a, 0x00206380, 0x0040788a, 0x00201480, ++ 0x00800041, 0x00408900, 0x00600006, 0x004085e6, 0x00700080, 0x0020007a, ++ 0x0060000a, 0x00104280, 0x002002d2, 0x0060000a, 0x00200004, 0x00800001, ++ 0x00700000, 0x00200000, 0x0060000a, 0x00106002, 0x0040a068, 0x00700000, ++ 0x00200000, 0x0060000a, 0x00106002, 0x00700080, 0x00400a68, 0x00500060, ++ 0x00600007, 0x00409388, 0x0060000f, 0x00500060, 0x00200000, 0x0060000a, ++ 0x00700000, 0x00106001, 0x00910880, 0x00901ffe, 0x00940400, 0x00200020, ++ 0x0060000b, 0x00500069, 0x0060000c, 0x00402168, 0x0040a206, 0x0040a305, ++ 0x00600009, 0x00700005, 0x00700006, 0x0060000e, ~0 ++}; ++ ++static uint32_t nv43_ctx_prog[] = { ++ 0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001, ++ 0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409565, 0x00409a06, ++ 0x0040a868, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080, ++ 0x004014e6, 0x007000a0, 0x00401a84, 0x00700082, 0x00600001, 0x00500061, ++ 0x00600002, 0x00401b68, 0x00500060, 0x00200001, 0x0060000a, 0x0011814d, ++ 0x00110158, 0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4, ++ 0x001041c9, 0x0010c1dc, 0x00150210, 0x0012c225, 0x00108238, 0x0010823e, ++ 0x001242c0, 0x00200040, 0x00100280, 0x00128100, 0x00128120, 0x00128143, ++ 0x0011415f, 0x0010815c, 0x0010c140, 0x00104029, 0x00110400, 0x00104d10, ++ 0x001046ec, 0x00500060, 0x00403a87, 0x0060000d, 0x00407ce6, 0x002000f1, ++ 0x0060000a, 0x00148653, 0x00104668, 0x0010c66d, 0x00120682, 0x0011068b, ++ 0x00168691, 0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4, 0x001146c6, ++ 0x00200020, 0x001006cc, 0x001046ed, 0x001246f0, 0x002000c0, 0x00100700, ++ 0x0010c3d7, 0x001043e1, 0x00500060, 0x00405800, 0x00405884, 0x00600003, ++ 0x00500067, 0x00600008, 0x00500060, 0x00700082, 0x00200233, 0x0060000a, ++ 0x00104800, 0x00108901, 0x00124920, 0x0020001f, 0x00100940, 0x00140965, ++ 0x00148a00, 0x00108a14, 0x00160b00, 0x00134b2c, 0x0010cd00, 0x0010cd04, ++ 0x0010cd08, 0x00104d80, 0x00104e00, 0x0012d600, 0x00105c00, 0x00104f06, ++ 0x002002c8, 0x0060000a, 0x00300000, 0x00200680, 0x00407200, 0x00200684, ++ 0x00800001, 0x00200b10, 0x0060000a, 0x00203870, 0x0040788a, 0x00201350, ++ 0x00800041, 0x00407c84, 0x00201560, 0x00800002, 0x00408d00, 0x00600006, ++ 0x00700003, 0x004086e6, 0x00700080, 0x002002c8, 0x0060000a, 0x00200004, ++ 0x00800001, 0x00700000, 0x00200000, 0x0060000a, 0x00106002, 0x0040a884, ++ 0x00700002, 0x00600004, 0x0040a868, 0x00700000, 0x00200000, 0x0060000a, ++ 0x00106002, 0x00700080, 0x00400a84, 0x00700002, 0x00400a68, 0x00500060, ++ 0x00600007, 0x00409988, 0x0060000f, 0x00000000, 0x00500060, 0x00200000, ++ 0x0060000a, 0x00700000, 0x00106001, 0x00700083, 0x00910880, 0x00901ffe, ++ 0x00940400, 0x00200020, 0x0060000b, 0x00500069, 0x0060000c, 0x00401b68, ++ 0x0040aa06, 0x0040ab05, 0x00600009, 0x00700005, 0x00700006, 0x0060000e, ++ ~0 ++}; ++ ++static uint32_t nv44_ctx_prog[] = { ++ 0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001, ++ 0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409a65, 0x00409f06, ++ 0x0040ac68, 0x0040248f, 0x00200001, 0x0060000a, 0x00700080, 0x00104042, ++ 0x001041c6, 0x00104040, 0x00200001, 0x0060000a, 0x00700000, 0x001040c5, ++ 0x00402320, 0x00402321, 0x00402322, 0x00402324, 0x00402326, 0x0040232b, ++ 0x001040c5, 0x00402328, 0x001040c5, 0x00402320, 0x00402468, 0x0060000d, ++ 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080, 0x00402be6, ++ 0x007000a0, 0x00500060, 0x00200001, 0x0060000a, 0x0011814d, 0x00110158, ++ 0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4, 0x001041c9, ++ 0x0010c1dc, 0x00150210, 0x0012c225, 0x00108238, 0x0010823e, 0x001242c0, ++ 0x00200040, 0x00100280, 0x00128100, 0x00128120, 0x00128143, 0x0011415f, ++ 0x0010815c, 0x0010c140, 0x00104029, 0x00110400, 0x00104d10, 0x001046ec, ++ 0x00500060, 0x00404b87, 0x0060000d, 0x004084e6, 0x002000f1, 0x0060000a, ++ 0x00148653, 0x00104668, 0x0010c66d, 0x00120682, 0x0011068b, 0x00168691, ++ 0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4, 0x001146c6, 0x001646cc, ++ 0x001186e6, 0x001046ed, 0x001246f0, 0x002000c0, 0x00100700, 0x0010c3d7, ++ 0x001043e1, 0x00500060, 0x00200232, 0x0060000a, 0x00104800, 0x00108901, ++ 0x00104910, 0x00124920, 0x0020001f, 0x00100940, 0x00140965, 0x00148a00, ++ 0x00108a14, 0x00160b00, 0x00134b2c, 0x0010cd00, 0x0010cd04, 0x0010cd08, ++ 0x00104d80, 0x00104e00, 0x0012d600, 0x00105c00, 0x00104f06, 0x002002c8, ++ 0x0060000a, 0x00300000, 0x00200080, 0x00407d00, 0x00200084, 0x00800001, ++ 0x00200510, 0x0060000a, 0x002037e0, 0x0040838a, 0x00201320, 0x00800029, ++ 0x00409400, 0x00600006, 0x004090e6, 0x00700080, 0x0020007a, 0x0060000a, ++ 0x00104280, 0x002002c8, 0x0060000a, 0x00200004, 0x00800001, 0x00700000, ++ 0x00200000, 0x0060000a, 0x00106002, 0x0040ac68, 0x00700000, 0x00200000, ++ 0x0060000a, 0x00106002, 0x00700080, 0x00400a68, 0x00500060, 0x00600007, ++ 0x00409e88, 0x0060000f, 0x00000000, 0x00500060, 0x00200000, 0x0060000a, ++ 0x00700000, 0x00106001, 0x00910880, 0x00901ffe, 0x01940000, 0x00200020, ++ 0x0060000b, 0x00500069, 0x0060000c, 0x00402c68, 0x0040ae06, 0x0040af05, ++ 0x00600009, 0x00700005, 0x00700006, 0x0060000e, ~0 ++}; ++ ++static uint32_t nv46_ctx_prog[] = { ++ 0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001, ++ 0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00408f65, 0x00409306, ++ 0x0040a068, 0x0040198f, 0x00200001, 0x0060000a, 0x00700080, 0x00104042, ++ 0x00200001, 0x0060000a, 0x00700000, 0x001040c5, 0x00401826, 0x00401968, ++ 0x0060000d, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080, ++ 0x004020e6, 0x007000a0, 0x00500060, 0x00200008, 0x0060000a, 0x0011814d, ++ 0x00110158, 0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4, ++ 0x001041c9, 0x0010c1dc, 0x00150210, 0x0012c225, 0x00108238, 0x0010823e, ++ 0x001242c0, 0x00200040, 0x00100280, 0x00128100, 0x00128120, 0x00128143, ++ 0x0011415f, 0x0010815c, 0x0010c140, 0x00104029, 0x00110400, 0x00104d10, ++ 0x00500060, 0x00403f87, 0x0060000d, 0x004079e6, 0x002000f7, 0x0060000a, ++ 0x00200045, 0x00100620, 0x00104668, 0x0017466d, 0x0011068b, 0x00168691, ++ 0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4, 0x001146c6, 0x00200022, ++ 0x001006cc, 0x001246f0, 0x002000c0, 0x00100700, 0x0010c3d7, 0x001043e1, ++ 0x00500060, 0x0020027f, 0x0060000a, 0x00104800, 0x00108901, 0x00104910, ++ 0x00124920, 0x0020001f, 0x00100940, 0x00140965, 0x00148a00, 0x00108a14, ++ 0x00160b00, 0x00134b2c, 0x0010cd00, 0x0010cd04, 0x0010cd08, 0x00104d80, ++ 0x00104e00, 0x0012d600, 0x00105c00, 0x00104f06, 0x00105406, 0x00105709, ++ 0x00200316, 0x0060000a, 0x00300000, 0x00200080, 0x00407200, 0x00200084, ++ 0x00800001, 0x0020055e, 0x0060000a, 0x002037e0, 0x0040788a, 0x00201320, ++ 0x00800029, 0x00408900, 0x00600006, 0x004085e6, 0x00700080, 0x00200081, ++ 0x0060000a, 0x00104280, 0x00200316, 0x0060000a, 0x00200004, 0x00800001, ++ 0x00700000, 0x00200000, 0x0060000a, 0x00106002, 0x0040a068, 0x00700000, ++ 0x00200000, 0x0060000a, 0x00106002, 0x00700080, 0x00400a68, 0x00500060, ++ 0x00600007, 0x00409388, 0x0060000f, 0x00500060, 0x00200000, 0x0060000a, ++ 0x00700000, 0x00106001, 0x00910880, 0x00901ffe, 0x01940000, 0x00200020, ++ 0x0060000b, 0x00500069, 0x0060000c, 0x00402168, 0x0040a206, 0x0040a305, ++ 0x00600009, 0x00700005, 0x00700006, 0x0060000e, ~0 ++}; ++ ++static uint32_t nv47_ctx_prog[] = { ++ 0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001, ++ 0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409265, 0x00409606, ++ 0x0040a368, 0x0040198f, 0x00200001, 0x0060000a, 0x00700080, 0x00104042, ++ 0x00200001, 0x0060000a, 0x00700000, 0x001040c5, 0x00401826, 0x00401968, ++ 0x0060000d, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080, ++ 0x004020e6, 0x007000a0, 0x00500060, 0x00200001, 0x0060000a, 0x0011814d, ++ 0x00110158, 0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4, ++ 0x001041c9, 0x0010c1dc, 0x00150210, 0x0012c225, 0x00108238, 0x0010823e, ++ 0x001242c0, 0x00200040, 0x00100280, 0x00128100, 0x00128120, 0x00128143, ++ 0x0011415f, 0x0010815c, 0x0010c140, 0x00104029, 0x00110400, 0x00104d12, ++ 0x00500060, 0x00403f87, 0x0060000d, 0x00407ce6, 0x002000f0, 0x0060000a, ++ 0x00200020, 0x00100620, 0x00154650, 0x00104668, 0x0017466d, 0x0011068b, ++ 0x00168691, 0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4, 0x001146c6, ++ 0x00200022, 0x001006cc, 0x001246f0, 0x002000c0, 0x00100700, 0x0010c3d7, ++ 0x001043e1, 0x00500060, 0x00200268, 0x0060000a, 0x00104800, 0x00108901, ++ 0x00124920, 0x0020001f, 0x00100940, 0x00140965, 0x00144a00, 0x00104a19, ++ 0x0010ca1c, 0x00110b00, 0x00200028, 0x00100b08, 0x00134c2e, 0x0010cd00, ++ 0x0010cd04, 0x00120d08, 0x00104d80, 0x00104e00, 0x0012d600, 0x00105c00, ++ 0x00104f06, 0x00105406, 0x00105709, 0x00200318, 0x0060000a, 0x00300000, ++ 0x00200680, 0x00407500, 0x00200684, 0x00800001, 0x00200b60, 0x0060000a, ++ 0x00209540, 0x00407b8a, 0x00201350, 0x00800041, 0x00408c00, 0x00600006, ++ 0x004088e6, 0x00700080, 0x0020007a, 0x0060000a, 0x00104280, 0x00200318, ++ 0x0060000a, 0x00200004, 0x00800001, 0x00700000, 0x00200000, 0x0060000a, ++ 0x00106002, 0x0040a368, 0x00700000, 0x00200000, 0x0060000a, 0x00106002, ++ 0x00700080, 0x00400a68, 0x00500060, 0x00600007, 0x00409688, 0x0060000f, ++ 0x00500060, 0x00200000, 0x0060000a, 0x00700000, 0x00106001, 0x0091a880, ++ 0x00901ffe, 0x10940000, 0x00200020, 0x0060000b, 0x00500069, 0x0060000c, ++ 0x00402168, 0x0040a506, 0x0040a605, 0x00600009, 0x00700005, 0x00700006, ++ 0x0060000e, ~0 ++}; ++ ++//this is used for nv49 and nv4b ++static uint32_t nv49_4b_ctx_prog[] ={ ++ 0x00400564, 0x00400505, 0x00408165, 0x00408206, 0x00409e68, 0x00200020, ++ 0x0060000a, 0x00700080, 0x00104042, 0x00200020, 0x0060000a, 0x00700000, ++ 0x001040c5, 0x00400f26, 0x00401068, 0x0060000d, 0x0070008f, 0x0070000e, ++ 0x00408d68, 0x004015e6, 0x007000a0, 0x00700080, 0x0040180f, 0x00700000, ++ 0x00200029, 0x0060000a, 0x0011814d, 0x00110158, 0x00105401, 0x0020003a, ++ 0x00100051, 0x001040c5, 0x0010c1c4, 0x001041c9, 0x0010c1dc, 0x00150210, ++ 0x0012c225, 0x00108238, 0x0010823e, 0x001242c0, 0x00200040, 0x00100280, ++ 0x00128100, 0x00128120, 0x00128143, 0x0011415f, 0x0010815c, 0x0010c140, ++ 0x00104029, 0x00110400, 0x00104d12, 0x00500060, 0x004071e6, 0x00200118, ++ 0x0060000a, 0x00200020, 0x00100620, 0x00154650, 0x00104668, 0x0017466d, ++ 0x0011068b, 0x00168691, 0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4, ++ 0x001146c6, 0x00200022, 0x001006cc, 0x001246f0, 0x002000c0, 0x00100700, ++ 0x0010c3d7, 0x001043e1, 0x00500060, 0x00200290, 0x0060000a, 0x00104800, ++ 0x00108901, 0x00124920, 0x0020001f, 0x00100940, 0x00140965, 0x00144a00, ++ 0x00104a19, 0x0010ca1c, 0x00110b00, 0x00200028, 0x00100b08, 0x00134c2e, ++ 0x0010cd00, 0x0010cd04, 0x00120d08, 0x00104d80, 0x00104e00, 0x0012d600, ++ 0x00105c00, 0x00104f06, 0x00105406, 0x00105709, 0x00200340, 0x0060000a, ++ 0x00300000, 0x00200680, 0x00406a0f, 0x00200684, 0x00800001, 0x00200b88, ++ 0x0060000a, 0x00209540, 0x0040708a, 0x00201350, 0x00800041, 0x00407c0f, ++ 0x00600006, 0x00407ce6, 0x00700080, 0x002000a2, 0x0060000a, 0x00104280, ++ 0x00200340, 0x0060000a, 0x00200004, 0x00800001, 0x0070008e, 0x00408d68, ++ 0x0040020f, 0x00600006, 0x00409e68, 0x00600007, 0x0070000f, 0x0070000e, ++ 0x00408d68, 0x0091a880, 0x00901ffe, 0x10940000, 0x00200020, 0x0060000b, ++ 0x00500069, 0x0060000c, 0x00401568, 0x00700000, 0x00200001, 0x0040910e, ++ 0x00200021, 0x0060000a, 0x00409b0d, 0x00104a40, 0x00104a50, 0x00104a60, ++ 0x00104a70, 0x00104a80, 0x00104a90, 0x00104aa0, 0x00104ab0, 0x00407e0e, ++ 0x0040130f, 0x00408568, 0x0040a006, 0x0040a105, 0x00600009, 0x00700005, ++ 0x00700006, 0x0060000e, ~0 ++}; ++ ++ ++static uint32_t nv4a_ctx_prog[] = { ++ 0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001, ++ 0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409965, 0x00409e06, ++ 0x0040ac68, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080, ++ 0x004014e6, 0x007000a0, 0x00401a84, 0x00700082, 0x00600001, 0x00500061, ++ 0x00600002, 0x00401b68, 0x00500060, 0x00200001, 0x0060000a, 0x0011814d, ++ 0x00110158, 0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4, ++ 0x001041c9, 0x0010c1dc, 0x00150210, 0x0012c225, 0x00108238, 0x0010823e, ++ 0x001242c0, 0x00200040, 0x00100280, 0x00128100, 0x00128120, 0x00128143, ++ 0x0011415f, 0x0010815c, 0x0010c140, 0x00104029, 0x00110400, 0x00104d10, ++ 0x001046ec, 0x00500060, 0x00403a87, 0x0060000d, 0x00407de6, 0x002000f1, ++ 0x0060000a, 0x00148653, 0x00104668, 0x0010c66d, 0x00120682, 0x0011068b, ++ 0x00168691, 0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4, 0x001146c6, ++ 0x001646cc, 0x001186e6, 0x001046ed, 0x001246f0, 0x002000c0, 0x00100700, ++ 0x0010c3d7, 0x001043e1, 0x00500060, 0x00405800, 0x00405884, 0x00600003, ++ 0x00500067, 0x00600008, 0x00500060, 0x00700082, 0x00200232, 0x0060000a, ++ 0x00104800, 0x00108901, 0x00104910, 0x00124920, 0x0020001f, 0x00100940, ++ 0x00140965, 0x00148a00, 0x00108a14, 0x00160b00, 0x00134b2c, 0x0010cd00, ++ 0x0010cd04, 0x0010cd08, 0x00104d80, 0x00104e00, 0x0012d600, 0x00105c00, ++ 0x00104f06, 0x002002c8, 0x0060000a, 0x00300000, 0x00200080, 0x00407300, ++ 0x00200084, 0x00800001, 0x00200510, 0x0060000a, 0x002037e0, 0x0040798a, ++ 0x00201320, 0x00800029, 0x00407d84, 0x00201560, 0x00800002, 0x00409100, ++ 0x00600006, 0x00700003, 0x00408ae6, 0x00700080, 0x0020007a, 0x0060000a, ++ 0x00104280, 0x002002c8, 0x0060000a, 0x00200004, 0x00800001, 0x00700000, ++ 0x00200000, 0x0060000a, 0x00106002, 0x0040ac84, 0x00700002, 0x00600004, ++ 0x0040ac68, 0x00700000, 0x00200000, 0x0060000a, 0x00106002, 0x00700080, ++ 0x00400a84, 0x00700002, 0x00400a68, 0x00500060, 0x00600007, 0x00409d88, ++ 0x0060000f, 0x00000000, 0x00500060, 0x00200000, 0x0060000a, 0x00700000, ++ 0x00106001, 0x00700083, 0x00910880, 0x00901ffe, 0x01940000, 0x00200020, ++ 0x0060000b, 0x00500069, 0x0060000c, 0x00401b68, 0x0040ae06, 0x0040af05, ++ 0x00600009, 0x00700005, 0x00700006, 0x0060000e, ~0 ++}; ++ ++static uint32_t nv4c_ctx_prog[] = { ++ 0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001, ++ 0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409065, 0x00409406, ++ 0x0040a168, 0x0040198f, 0x00200001, 0x0060000a, 0x00700080, 0x00104042, ++ 0x00200001, 0x0060000a, 0x00700000, 0x001040c5, 0x00401826, 0x00401968, ++ 0x0060000d, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080, ++ 0x004020e6, 0x007000a0, 0x00500060, 0x00200001, 0x0060000a, 0x0011814d, ++ 0x00110158, 0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4, ++ 0x001041c9, 0x0010c1dc, 0x00150210, 0x0012c225, 0x00108238, 0x0010823e, ++ 0x001242c0, 0x00200040, 0x00100280, 0x00128100, 0x00128120, 0x00128143, ++ 0x0011415f, 0x0010815c, 0x0010c140, 0x00104029, 0x00110400, 0x00104d10, ++ 0x0010427e, 0x001046ec, 0x00500060, 0x00404187, 0x0060000d, 0x00407ae6, ++ 0x002000f2, 0x0060000a, 0x00148653, 0x00104668, 0x0010c66d, 0x00120682, ++ 0x0011068b, 0x00168691, 0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4, ++ 0x001146c6, 0x00200020, 0x001006cc, 0x001046ed, 0x001246f0, 0x002000c0, ++ 0x00100700, 0x0010c3d7, 0x001043e1, 0x00500060, 0x00200234, 0x0060000a, ++ 0x00104800, 0x00108901, 0x00104910, 0x00124920, 0x0020001f, 0x00100940, ++ 0x00140965, 0x00148a00, 0x00108a14, 0x00140b00, 0x00134b2c, 0x0010cd00, ++ 0x0010cd04, 0x00104d08, 0x00104d80, 0x00104e00, 0x0012d600, 0x00105c00, ++ 0x00104f06, 0x002002c0, 0x0060000a, 0x00300000, 0x00200080, 0x00407300, ++ 0x00200084, 0x00800001, 0x00200508, 0x0060000a, 0x00201320, 0x0040798a, ++ 0xfffffaf8, 0x00800029, 0x00408a00, 0x00600006, 0x004086e6, 0x00700080, ++ 0x0020007a, 0x0060000a, 0x00104280, 0x002002c0, 0x0060000a, 0x00200004, ++ 0x00800001, 0x00700000, 0x00200000, 0x0060000a, 0x00106002, 0x0040a168, ++ 0x00700000, 0x00200000, 0x0060000a, 0x00106002, 0x00700080, 0x00400a68, ++ 0x00500060, 0x00600007, 0x00409488, 0x0060000f, 0x00500060, 0x00200000, ++ 0x0060000a, 0x00700000, 0x00106001, 0x00910880, 0x00901ffe, 0x01940000, ++ 0x00200020, 0x0060000b, 0x00500069, 0x0060000c, 0x00402168, 0x0040a306, ++ 0x0040a405, 0x00600009, 0x00700005, 0x00700006, 0x0060000e, ~0 ++}; ++ ++static uint32_t nv4e_ctx_prog[] = { ++ 0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001, ++ 0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409565, 0x00409a06, ++ 0x0040a868, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080, ++ 0x004014e6, 0x007000a0, 0x00401a84, 0x00700082, 0x00600001, 0x00500061, ++ 0x00600002, 0x00401b68, 0x00500060, 0x00200001, 0x0060000a, 0x0011814d, ++ 0x00110158, 0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4, ++ 0x001041c9, 0x0010c1dc, 0x00150210, 0x0012c225, 0x00108238, 0x0010823e, ++ 0x001242c0, 0x00200040, 0x00100280, 0x00128100, 0x00128120, 0x00128143, ++ 0x0011415f, 0x0010815c, 0x0010c140, 0x00104029, 0x00110400, 0x00104d10, ++ 0x001046ec, 0x00500060, 0x00403a87, 0x0060000d, 0x00407ce6, 0x002000f1, ++ 0x0060000a, 0x00148653, 0x00104668, 0x0010c66d, 0x00120682, 0x0011068b, ++ 0x00168691, 0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4, 0x001146c6, ++ 0x001646cc, 0x001186e6, 0x001046ed, 0x001246f0, 0x002000c0, 0x00100700, ++ 0x0010c3d7, 0x001043e1, 0x00500060, 0x00405800, 0x00405884, 0x00600003, ++ 0x00500067, 0x00600008, 0x00500060, 0x00700082, 0x00200232, 0x0060000a, ++ 0x00104800, 0x00108901, 0x00104910, 0x00124920, 0x0020001f, 0x00100940, ++ 0x00140965, 0x00148a00, 0x00108a14, 0x00140b00, 0x00134b2c, 0x0010cd00, ++ 0x0010cd04, 0x00104d08, 0x00104d80, 0x00104e00, 0x00105c00, 0x00104f06, ++ 0x002002b2, 0x0060000a, 0x00300000, 0x00200080, 0x00407200, 0x00200084, ++ 0x00800001, 0x002004fa, 0x0060000a, 0x00201320, 0x0040788a, 0xfffffb06, ++ 0x00800029, 0x00407c84, 0x00200b20, 0x00800002, 0x00408d00, 0x00600006, ++ 0x00700003, 0x004086e6, 0x00700080, 0x002002b2, 0x0060000a, 0x00200004, ++ 0x00800001, 0x00700000, 0x00200000, 0x0060000a, 0x00106002, 0x0040a884, ++ 0x00700002, 0x00600004, 0x0040a868, 0x00700000, 0x00200000, 0x0060000a, ++ 0x00106002, 0x00700080, 0x00400a84, 0x00700002, 0x00400a68, 0x00500060, ++ 0x00600007, 0x00409988, 0x0060000f, 0x00000000, 0x00500060, 0x00200000, ++ 0x0060000a, 0x00700000, 0x00106001, 0x00700083, 0x00910880, 0x00901ffe, ++ 0x01940000, 0x00200020, 0x0060000b, 0x00500069, 0x0060000c, 0x00401b68, ++ 0x0040aa06, 0x0040ab05, 0x00600009, 0x00700005, 0x00700006, 0x0060000e, ++ ~0 ++}; ++ ++/* ++ * G70 0x47 ++ * G71 0x49 ++ * NV45 0x48 ++ * G72[M] 0x46 ++ * G73 0x4b ++ * C51_G7X 0x4c ++ * C51 0x4e ++ */ ++int ++nv40_graph_init(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = ++ (struct drm_nouveau_private *)dev->dev_private; ++ uint32_t *ctx_prog; ++ uint32_t vramsz, tmp; ++ int i, j; ++ ++ NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) & ++ ~NV_PMC_ENABLE_PGRAPH); ++ NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) | ++ NV_PMC_ENABLE_PGRAPH); ++ ++ switch (dev_priv->chipset) { ++ case 0x40: ctx_prog = nv40_ctx_prog; break; ++ case 0x41: ++ case 0x42: ctx_prog = nv41_ctx_prog; break; ++ case 0x43: ctx_prog = nv43_ctx_prog; break; ++ case 0x44: ctx_prog = nv44_ctx_prog; break; ++ case 0x46: ctx_prog = nv46_ctx_prog; break; ++ case 0x47: ctx_prog = nv47_ctx_prog; break; ++ case 0x49: ctx_prog = nv49_4b_ctx_prog; break; ++ case 0x4a: ctx_prog = nv4a_ctx_prog; break; ++ case 0x4b: ctx_prog = nv49_4b_ctx_prog; break; ++ case 0x4c: ++ case 0x67: ctx_prog = nv4c_ctx_prog; break; ++ case 0x4e: ctx_prog = nv4e_ctx_prog; break; ++ default: ++ DRM_ERROR("Context program for 0x%02x unavailable\n", ++ dev_priv->chipset); ++ ctx_prog = NULL; ++ break; ++ } ++ ++ /* Load the context program onto the card */ ++ if (ctx_prog) { ++ DRM_DEBUG("Loading context program\n"); ++ i = 0; ++ ++ NV_WRITE(NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0); ++ while (ctx_prog[i] != ~0) { ++ NV_WRITE(NV40_PGRAPH_CTXCTL_UCODE_DATA, ctx_prog[i]); ++ i++; ++ } ++ } ++ ++ /* No context present currently */ ++ NV_WRITE(NV40_PGRAPH_CTXCTL_CUR, 0x00000000); ++ ++ NV_WRITE(NV03_PGRAPH_INTR , 0xFFFFFFFF); ++ NV_WRITE(NV40_PGRAPH_INTR_EN, 0xFFFFFFFF); ++ ++ NV_WRITE(NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF); ++ NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x00000000); ++ NV_WRITE(NV04_PGRAPH_DEBUG_1, 0x401287c0); ++ NV_WRITE(NV04_PGRAPH_DEBUG_3, 0xe0de8055); ++ NV_WRITE(NV10_PGRAPH_DEBUG_4, 0x00008000); ++ NV_WRITE(NV04_PGRAPH_LIMIT_VIOL_PIX, 0x00be3c5f); ++ ++ NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10010100); ++ NV_WRITE(NV10_PGRAPH_STATE , 0xFFFFFFFF); ++ NV_WRITE(NV04_PGRAPH_FIFO , 0x00000001); ++ ++ j = NV_READ(0x1540) & 0xff; ++ if (j) { ++ for (i=0; !(j&1); j>>=1, i++); ++ NV_WRITE(0x405000, i); ++ } ++ ++ if (dev_priv->chipset == 0x40) { ++ NV_WRITE(0x4009b0, 0x83280fff); ++ NV_WRITE(0x4009b4, 0x000000a0); ++ } else { ++ NV_WRITE(0x400820, 0x83280eff); ++ NV_WRITE(0x400824, 0x000000a0); ++ } ++ ++ switch (dev_priv->chipset) { ++ case 0x40: ++ case 0x45: ++ NV_WRITE(0x4009b8, 0x0078e366); ++ NV_WRITE(0x4009bc, 0x0000014c); ++ break; ++ case 0x41: ++ case 0x42: /* pciid also 0x00Cx */ ++// case 0x0120: //XXX (pciid) ++ NV_WRITE(0x400828, 0x007596ff); ++ NV_WRITE(0x40082c, 0x00000108); ++ break; ++ case 0x43: ++ NV_WRITE(0x400828, 0x0072cb77); ++ NV_WRITE(0x40082c, 0x00000108); ++ break; ++ case 0x44: ++ case 0x46: /* G72 */ ++ case 0x4a: ++ case 0x4c: /* G7x-based C51 */ ++ case 0x4e: ++ NV_WRITE(0x400860, 0); ++ NV_WRITE(0x400864, 0); ++ break; ++ case 0x47: /* G70 */ ++ case 0x49: /* G71 */ ++ case 0x4b: /* G73 */ ++ NV_WRITE(0x400828, 0x07830610); ++ NV_WRITE(0x40082c, 0x0000016A); ++ break; ++ default: ++ break; ++ } ++ ++ NV_WRITE(0x400b38, 0x2ffff800); ++ NV_WRITE(0x400b3c, 0x00006000); ++ ++ /* copy tile info from PFB */ ++ switch (dev_priv->chipset) { ++ case 0x40: /* vanilla NV40 */ ++ for (i=0; ichipset) { ++ case 0x40: ++ NV_WRITE(0x4009A4, NV_READ(NV04_PFB_CFG0)); ++ NV_WRITE(0x4009A8, NV_READ(NV04_PFB_CFG1)); ++ NV_WRITE(0x4069A4, NV_READ(NV04_PFB_CFG0)); ++ NV_WRITE(0x4069A8, NV_READ(NV04_PFB_CFG1)); ++ NV_WRITE(0x400820, 0); ++ NV_WRITE(0x400824, 0); ++ NV_WRITE(0x400864, vramsz); ++ NV_WRITE(0x400868, vramsz); ++ break; ++ default: ++ switch (dev_priv->chipset) { ++ case 0x46: ++ case 0x47: ++ case 0x49: ++ case 0x4b: ++ NV_WRITE(0x400DF0, NV_READ(NV04_PFB_CFG0)); ++ NV_WRITE(0x400DF4, NV_READ(NV04_PFB_CFG1)); ++ break; ++ default: ++ NV_WRITE(0x4009F0, NV_READ(NV04_PFB_CFG0)); ++ NV_WRITE(0x4009F4, NV_READ(NV04_PFB_CFG1)); ++ break; ++ } ++ NV_WRITE(0x4069F0, NV_READ(NV04_PFB_CFG0)); ++ NV_WRITE(0x4069F4, NV_READ(NV04_PFB_CFG1)); ++ NV_WRITE(0x400840, 0); ++ NV_WRITE(0x400844, 0); ++ NV_WRITE(0x4008A0, vramsz); ++ NV_WRITE(0x4008A4, vramsz); ++ break; ++ } ++ ++ /* per-context state, doesn't belong here */ ++ NV_WRITE(0x400B20, 0x00000000); ++ NV_WRITE(0x400B04, 0xFFFFFFFF); ++ ++ tmp = NV_READ(NV10_PGRAPH_SURFACE) & 0x0007ff00; ++ NV_WRITE(NV10_PGRAPH_SURFACE, tmp); ++ tmp = NV_READ(NV10_PGRAPH_SURFACE) | 0x00020100; ++ NV_WRITE(NV10_PGRAPH_SURFACE, tmp); ++ ++ NV_WRITE(NV03_PGRAPH_ABS_UCLIP_XMIN, 0); ++ NV_WRITE(NV03_PGRAPH_ABS_UCLIP_YMIN, 0); ++ NV_WRITE(NV03_PGRAPH_ABS_UCLIP_XMAX, 0x7fff); ++ NV_WRITE(NV03_PGRAPH_ABS_UCLIP_YMAX, 0x7fff); ++ ++ return 0; ++} ++ ++void nv40_graph_takedown(struct drm_device *dev) ++{ ++} +diff --git a/drivers/gpu/drm/nouveau/nv40_mc.c b/drivers/gpu/drm/nouveau/nv40_mc.c +new file mode 100644 +index 0000000..ead6f87 +--- /dev/null ++++ b/drivers/gpu/drm/nouveau/nv40_mc.c +@@ -0,0 +1,38 @@ ++#include "drmP.h" ++#include "drm.h" ++#include "nouveau_drv.h" ++#include "nouveau_drm.h" ++ ++int ++nv40_mc_init(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t tmp; ++ ++ /* Power up everything, resetting each individual unit will ++ * be done later if needed. ++ */ ++ NV_WRITE(NV03_PMC_ENABLE, 0xFFFFFFFF); ++ ++ switch (dev_priv->chipset) { ++ case 0x44: ++ case 0x46: /* G72 */ ++ case 0x4e: ++ case 0x4c: /* C51_G7X */ ++ tmp = NV_READ(NV40_PFB_020C); ++ NV_WRITE(NV40_PMC_1700, tmp); ++ NV_WRITE(NV40_PMC_1704, 0); ++ NV_WRITE(NV40_PMC_1708, 0); ++ NV_WRITE(NV40_PMC_170C, tmp); ++ break; ++ default: ++ break; ++ } ++ ++ return 0; ++} ++ ++void ++nv40_mc_takedown(struct drm_device *dev) ++{ ++} +diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c +new file mode 100644 +index 0000000..d681066 +--- /dev/null ++++ b/drivers/gpu/drm/nouveau/nv50_fifo.c +@@ -0,0 +1,343 @@ ++/* ++ * Copyright (C) 2007 Ben Skeggs. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining ++ * a copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sublicense, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial ++ * portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. ++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE ++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION ++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION ++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "nouveau_drv.h" ++ ++struct nv50_fifo_priv { ++ struct nouveau_gpuobj_ref *thingo[2]; ++ int cur_thingo; ++}; ++ ++#define IS_G80 ((dev_priv->chipset & 0xf0) == 0x50) ++ ++static void ++nv50_fifo_init_thingo(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nv50_fifo_priv *priv = dev_priv->Engine.fifo.priv; ++ struct nouveau_gpuobj_ref *cur; ++ int i, nr; ++ ++ DRM_DEBUG("\n"); ++ ++ cur = priv->thingo[priv->cur_thingo]; ++ priv->cur_thingo = !priv->cur_thingo; ++ ++ /* We never schedule channel 0 or 127 */ ++ for (i = 1, nr = 0; i < 127; i++) { ++ if (dev_priv->fifos[i]) { ++ INSTANCE_WR(cur->gpuobj, nr++, i); ++ } ++ } ++ NV_WRITE(0x32f4, cur->instance >> 12); ++ NV_WRITE(0x32ec, nr); ++ NV_WRITE(0x2500, 0x101); ++} ++ ++static int ++nv50_fifo_channel_enable(struct drm_device *dev, int channel, int nt) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_channel *chan = dev_priv->fifos[channel]; ++ uint32_t inst; ++ ++ DRM_DEBUG("ch%d\n", channel); ++ ++ if (!chan->ramfc) ++ return -EINVAL; ++ ++ if (IS_G80) inst = chan->ramfc->instance >> 12; ++ else inst = chan->ramfc->instance >> 8; ++ NV_WRITE(NV50_PFIFO_CTX_TABLE(channel), ++ inst | NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED); ++ ++ if (!nt) nv50_fifo_init_thingo(dev); ++ return 0; ++} ++ ++static void ++nv50_fifo_channel_disable(struct drm_device *dev, int channel, int nt) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t inst; ++ ++ DRM_DEBUG("ch%d, nt=%d\n", channel, nt); ++ ++ if (IS_G80) inst = NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80; ++ else inst = NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84; ++ NV_WRITE(NV50_PFIFO_CTX_TABLE(channel), inst); ++ ++ if (!nt) nv50_fifo_init_thingo(dev); ++} ++ ++static void ++nv50_fifo_init_reset(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t pmc_e = NV_PMC_ENABLE_PFIFO; ++ ++ DRM_DEBUG("\n"); ++ ++ NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) & ~pmc_e); ++ NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) | pmc_e); ++} ++ ++static void ++nv50_fifo_init_intr(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ DRM_DEBUG("\n"); ++ ++ NV_WRITE(NV03_PFIFO_INTR_0, 0xFFFFFFFF); ++ NV_WRITE(NV03_PFIFO_INTR_EN_0, 0xFFFFFFFF); ++} ++ ++static void ++nv50_fifo_init_context_table(struct drm_device *dev) ++{ ++ int i; ++ ++ DRM_DEBUG("\n"); ++ ++ for (i = 0; i < NV50_PFIFO_CTX_TABLE__SIZE; i++) ++ nv50_fifo_channel_disable(dev, i, 1); ++ nv50_fifo_init_thingo(dev); ++} ++ ++static void ++nv50_fifo_init_regs__nv(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ DRM_DEBUG("\n"); ++ ++ NV_WRITE(0x250c, 0x6f3cfc34); ++} ++ ++static void ++nv50_fifo_init_regs(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ DRM_DEBUG("\n"); ++ ++ NV_WRITE(0x2500, 0); ++ NV_WRITE(0x3250, 0); ++ NV_WRITE(0x3220, 0); ++ NV_WRITE(0x3204, 0); ++ NV_WRITE(0x3210, 0); ++ NV_WRITE(0x3270, 0); ++ ++ /* Enable dummy channels setup by nv50_instmem.c */ ++ nv50_fifo_channel_enable(dev, 0, 1); ++ nv50_fifo_channel_enable(dev, 127, 1); ++} ++ ++int ++nv50_fifo_init(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nv50_fifo_priv *priv; ++ int ret; ++ ++ DRM_DEBUG("\n"); ++ ++ priv = drm_calloc(1, sizeof(*priv), DRM_MEM_DRIVER); ++ if (!priv) ++ return -ENOMEM; ++ dev_priv->Engine.fifo.priv = priv; ++ ++ nv50_fifo_init_reset(dev); ++ nv50_fifo_init_intr(dev); ++ ++ ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 128*4, 0x1000, ++ NVOBJ_FLAG_ZERO_ALLOC, &priv->thingo[0]); ++ if (ret) { ++ DRM_ERROR("error creating thingo0: %d\n", ret); ++ return ret; ++ } ++ ++ ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 128*4, 0x1000, ++ NVOBJ_FLAG_ZERO_ALLOC, &priv->thingo[1]); ++ if (ret) { ++ DRM_ERROR("error creating thingo1: %d\n", ret); ++ return ret; ++ } ++ ++ nv50_fifo_init_context_table(dev); ++ nv50_fifo_init_regs__nv(dev); ++ nv50_fifo_init_regs(dev); ++ ++ return 0; ++} ++ ++void ++nv50_fifo_takedown(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nv50_fifo_priv *priv = dev_priv->Engine.fifo.priv; ++ ++ DRM_DEBUG("\n"); ++ ++ if (!priv) ++ return; ++ ++ nouveau_gpuobj_ref_del(dev, &priv->thingo[0]); ++ nouveau_gpuobj_ref_del(dev, &priv->thingo[1]); ++ ++ dev_priv->Engine.fifo.priv = NULL; ++ drm_free(priv, sizeof(*priv), DRM_MEM_DRIVER); ++} ++ ++int ++nv50_fifo_channel_id(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ return (NV_READ(NV03_PFIFO_CACHE1_PUSH1) & ++ NV50_PFIFO_CACHE1_PUSH1_CHID_MASK); ++} ++ ++int ++nv50_fifo_create_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_gpuobj *ramfc = NULL; ++ int ret; ++ ++ DRM_DEBUG("ch%d\n", chan->id); ++ ++ if (IS_G80) { ++ uint32_t ramfc_offset = chan->ramin->gpuobj->im_pramin->start; ++ uint32_t vram_offset = chan->ramin->gpuobj->im_backing->start; ++ ret = nouveau_gpuobj_new_fake(dev, ramfc_offset, vram_offset, ++ 0x100, NVOBJ_FLAG_ZERO_ALLOC | ++ NVOBJ_FLAG_ZERO_FREE, &ramfc, ++ &chan->ramfc); ++ if (ret) ++ return ret; ++ } else { ++ ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 0x100, 256, ++ NVOBJ_FLAG_ZERO_ALLOC | ++ NVOBJ_FLAG_ZERO_FREE, ++ &chan->ramfc); ++ if (ret) ++ return ret; ++ ramfc = chan->ramfc->gpuobj; ++ } ++ ++ INSTANCE_WR(ramfc, 0x48/4, chan->pushbuf->instance >> 4); ++ INSTANCE_WR(ramfc, 0x80/4, (0xc << 24) | (chan->ramht->instance >> 4)); ++ INSTANCE_WR(ramfc, 0x3c/4, 0x000f0078); /* fetch? */ ++ INSTANCE_WR(ramfc, 0x44/4, 0x2101ffff); ++ INSTANCE_WR(ramfc, 0x60/4, 0x7fffffff); ++ INSTANCE_WR(ramfc, 0x10/4, 0x00000000); ++ INSTANCE_WR(ramfc, 0x08/4, 0x00000000); ++ INSTANCE_WR(ramfc, 0x40/4, 0x00000000); ++ INSTANCE_WR(ramfc, 0x50/4, 0x2039b2e0); ++ INSTANCE_WR(ramfc, 0x54/4, 0x000f0000); ++ INSTANCE_WR(ramfc, 0x7c/4, 0x30000001); ++ INSTANCE_WR(ramfc, 0x78/4, 0x00000000); ++ INSTANCE_WR(ramfc, 0x4c/4, chan->pushbuf_mem->size - 1); ++ ++ if (!IS_G80) { ++ INSTANCE_WR(chan->ramin->gpuobj, 0, chan->id); ++ INSTANCE_WR(chan->ramin->gpuobj, 1, chan->ramfc->instance); ++ ++ INSTANCE_WR(ramfc, 0x88/4, 0x3d520); /* some vram addy >> 10 */ ++ INSTANCE_WR(ramfc, 0x98/4, chan->ramin->instance >> 12); ++ } ++ ++ ret = nv50_fifo_channel_enable(dev, chan->id, 0); ++ if (ret) { ++ DRM_ERROR("error enabling ch%d: %d\n", chan->id, ret); ++ nouveau_gpuobj_ref_del(dev, &chan->ramfc); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++void ++nv50_fifo_destroy_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ DRM_DEBUG("ch%d\n", chan->id); ++ ++ nv50_fifo_channel_disable(dev, chan->id, 0); ++ ++ /* Dummy channel, also used on ch 127 */ ++ if (chan->id == 0) ++ nv50_fifo_channel_disable(dev, 127, 0); ++ ++ if ((NV_READ(NV03_PFIFO_CACHE1_PUSH1) & 0xffff) == chan->id) ++ NV_WRITE(NV03_PFIFO_CACHE1_PUSH1, 127); ++ ++ nouveau_gpuobj_ref_del(dev, &chan->ramfc); ++} ++ ++int ++nv50_fifo_load_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_gpuobj *ramfc = chan->ramfc->gpuobj; ++ ++ DRM_DEBUG("ch%d\n", chan->id); ++ ++ /*XXX: incomplete, only touches the regs that NV does */ ++ ++ NV_WRITE(0x3244, 0); ++ NV_WRITE(0x3240, 0); ++ ++ NV_WRITE(0x3224, INSTANCE_RD(ramfc, 0x3c/4)); ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_INSTANCE, INSTANCE_RD(ramfc, 0x48/4)); ++ NV_WRITE(0x3234, INSTANCE_RD(ramfc, 0x4c/4)); ++ NV_WRITE(0x3254, 1); ++ NV_WRITE(NV03_PFIFO_RAMHT, INSTANCE_RD(ramfc, 0x80/4)); ++ ++ if (!IS_G80) { ++ NV_WRITE(0x340c, INSTANCE_RD(ramfc, 0x88/4)); ++ NV_WRITE(0x3410, INSTANCE_RD(ramfc, 0x98/4)); ++ } ++ ++ NV_WRITE(NV03_PFIFO_CACHE1_PUSH1, chan->id | (1<<16)); ++ return 0; ++} ++ ++int ++nv50_fifo_save_context(struct nouveau_channel *chan) ++{ ++ DRM_DEBUG("ch%d\n", chan->id); ++ DRM_ERROR("stub!\n"); ++ return 0; ++} +diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c +new file mode 100644 +index 0000000..35e123c +--- /dev/null ++++ b/drivers/gpu/drm/nouveau/nv50_graph.c +@@ -0,0 +1,2192 @@ ++/* ++ * Copyright (C) 2007 Ben Skeggs. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining ++ * a copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sublicense, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial ++ * portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. ++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE ++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION ++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION ++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "nouveau_drv.h" ++ ++#define IS_G80 ((dev_priv->chipset & 0xf0) == 0x50) ++ ++static void ++nv50_graph_init_reset(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t pmc_e = NV_PMC_ENABLE_PGRAPH | (1 << 21); ++ ++ DRM_DEBUG("\n"); ++ ++ NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) & ~pmc_e); ++ NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) | pmc_e); ++} ++ ++static void ++nv50_graph_init_intr(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ DRM_DEBUG("\n"); ++ NV_WRITE(NV03_PGRAPH_INTR, 0xffffffff); ++ NV_WRITE(0x400138, 0xffffffff); ++ NV_WRITE(NV40_PGRAPH_INTR_EN, 0xffffffff); ++} ++ ++static void ++nv50_graph_init_regs__nv(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ DRM_DEBUG("\n"); ++ ++ NV_WRITE(0x400804, 0xc0000000); ++ NV_WRITE(0x406800, 0xc0000000); ++ NV_WRITE(0x400c04, 0xc0000000); ++ NV_WRITE(0x401804, 0xc0000000); ++ NV_WRITE(0x405018, 0xc0000000); ++ NV_WRITE(0x402000, 0xc0000000); ++ ++ NV_WRITE(0x400108, 0xffffffff); ++ ++ NV_WRITE(0x400824, 0x00004000); ++ NV_WRITE(0x400500, 0x00010001); ++} ++ ++static void ++nv50_graph_init_regs(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ DRM_DEBUG("\n"); ++ ++ NV_WRITE(NV04_PGRAPH_DEBUG_3, (1<<2) /* HW_CONTEXT_SWITCH_ENABLED */); ++} ++ ++static uint32_t nv84_ctx_voodoo[] = { ++ 0x0070008e, 0x0070009c, 0x00200020, 0x00600008, 0x0050004c, 0x00400e89, ++ 0x00200000, 0x00600007, 0x00300000, 0x00c000ff, 0x00200000, 0x008000ff, ++ 0x00700009, 0x0041634d, 0x00402944, 0x00402905, 0x0040290d, 0x00413e06, ++ 0x00600005, 0x004015c5, 0x00600011, 0x0040270b, 0x004021c5, 0x00700000, ++ 0x00700081, 0x00600004, 0x0050004a, 0x00216f40, 0x00600007, 0x00c02801, ++ 0x0020002e, 0x00800001, 0x005000cb, 0x0090ffff, 0x0091ffff, 0x00200020, ++ 0x00600008, 0x0050004c, 0x00600009, 0x00413e45, 0x0041594d, 0x0070009d, ++ 0x00402dcf, 0x0070009f, 0x0050009f, 0x00402ac0, 0x00200200, 0x00600008, ++ 0x00402a4f, 0x00402ac0, 0x004030cc, 0x00700081, 0x00200000, 0x00600006, ++ 0x00700000, 0x00111bfc, 0x00700083, 0x00300000, 0x00216f40, 0x00600007, ++ 0x00c00b01, 0x0020001e, 0x00800001, 0x005000cb, 0x00c000ff, 0x00700080, ++ 0x00700083, 0x00200047, 0x00600006, 0x0011020a, 0x00200480, 0x00600007, ++ 0x00300000, 0x00c000ff, 0x00c800ff, 0x00414907, 0x00202916, 0x008000ff, ++ 0x0040508c, 0x005000cb, 0x00a0023f, 0x00200040, 0x00600006, 0x0070000f, ++ 0x00170202, 0x0011020a, 0x00200032, 0x0010020d, 0x001c0242, 0x00120302, ++ 0x00140402, 0x00180500, 0x00130509, 0x00150550, 0x00110605, 0x0020000f, ++ 0x00100607, 0x00110700, 0x00110900, 0x00120902, 0x00110a00, 0x00160b02, ++ 0x00120b28, 0x00140b2b, 0x00110c01, 0x00111400, 0x00111405, 0x00111407, ++ 0x00111409, 0x0011140b, 0x002000cb, 0x00101500, 0x0040790f, 0x0040794b, ++ 0x00214d40, 0x00600007, 0x0020043e, 0x008800ff, 0x0070008f, 0x0040798c, ++ 0x005000cb, 0x00000000, 0x0020002b, 0x00101a05, 0x00131c00, 0x00121c04, ++ 0x00141c20, 0x00111c25, 0x00131c40, 0x00121c44, 0x00141c60, 0x00111c65, ++ 0x00131c80, 0x00121c84, 0x00141ca0, 0x00111ca5, 0x00131cc0, 0x00121cc4, ++ 0x00141ce0, 0x00111ce5, 0x00131f00, 0x00191f40, 0x0040a1e0, 0x002001ed, ++ 0x00600006, 0x00200044, 0x00102080, 0x001120c6, 0x001520c9, 0x001920d0, ++ 0x00122100, 0x00122103, 0x00162200, 0x00122207, 0x00112280, 0x00112300, ++ 0x00112302, 0x00122380, 0x0011238b, 0x00112394, 0x0011239c, 0x0040bee1, ++ 0x00200254, 0x00600006, 0x00200044, 0x00102480, 0x0040af0f, 0x0040af4b, ++ 0x00214d40, 0x00600007, 0x0020043e, 0x008800ff, 0x0070008f, 0x0040af8c, ++ 0x005000cb, 0x00000000, 0x001124c6, 0x001524c9, 0x001924d0, 0x00122500, ++ 0x00122503, 0x00162600, 0x00122607, 0x00112680, 0x00112700, 0x00112702, ++ 0x00122780, 0x0011278b, 0x00112794, 0x0011279c, 0x0040d1e2, 0x002002bb, ++ 0x00600006, 0x00200044, 0x00102880, 0x001128c6, 0x001528c9, 0x001928d0, ++ 0x00122900, 0x00122903, 0x00162a00, 0x00122a07, 0x00112a80, 0x00112b00, ++ 0x00112b02, 0x00122b80, 0x00112b8b, 0x00112b94, 0x00112b9c, 0x0040eee3, ++ 0x00200322, 0x00600006, 0x00200044, 0x00102c80, 0x0040df0f, 0x0040df4b, ++ 0x00214d40, 0x00600007, 0x0020043e, 0x008800ff, 0x0070008f, 0x0040df8c, ++ 0x005000cb, 0x00000000, 0x00112cc6, 0x00152cc9, 0x00192cd0, 0x00122d00, ++ 0x00122d03, 0x00162e00, 0x00122e07, 0x00112e80, 0x00112f00, 0x00112f02, ++ 0x00122f80, 0x00112f8b, 0x00112f94, 0x00112f9c, 0x004101e4, 0x00200389, ++ 0x00600006, 0x00200044, 0x00103080, 0x001130c6, 0x001530c9, 0x001930d0, ++ 0x00123100, 0x00123103, 0x00163200, 0x00123207, 0x00113280, 0x00113300, ++ 0x00113302, 0x00123380, 0x0011338b, 0x00113394, 0x0011339c, 0x00411ee5, ++ 0x002003f0, 0x00600006, 0x00200044, 0x00103480, 0x00410f0f, 0x00410f4b, ++ 0x00214d40, 0x00600007, 0x0020043e, 0x008800ff, 0x0070008f, 0x00410f8c, ++ 0x005000cb, 0x00000000, 0x001134c6, 0x001534c9, 0x001934d0, 0x00123500, ++ 0x00123503, 0x00163600, 0x00123607, 0x00113680, 0x00113700, 0x00113702, ++ 0x00123780, 0x0011378b, 0x00113794, 0x0011379c, 0x00000000, 0x0041250f, ++ 0x005000cb, 0x00214d40, 0x00600007, 0x0020043e, 0x008800ff, 0x005000cb, ++ 0x00412887, 0x0060000a, 0x00000000, 0x00413700, 0x007000a0, 0x00700080, ++ 0x00200480, 0x00600007, 0x00200004, 0x00c000ff, 0x008000ff, 0x005000cb, ++ 0x00700000, 0x00200000, 0x00600006, 0x00111bfe, 0x0041594d, 0x00700000, ++ 0x00200000, 0x00600006, 0x00111bfe, 0x00700080, 0x0070001d, 0x0040114d, ++ 0x00700081, 0x00600004, 0x0050004a, 0x00414388, 0x0060000b, 0x00200000, ++ 0x00600006, 0x00700000, 0x0041590b, 0x00111bfd, 0x0040424d, 0x00202916, ++ 0x008000fd, 0x005000cb, 0x00c00002, 0x00200480, 0x00600007, 0x00200160, ++ 0x00800002, 0x005000cb, 0x00c01802, 0x002027b6, 0x00800002, 0x005000cb, ++ 0x00404e4d, 0x0060000b, 0x0041574d, 0x00700001, 0x005000cf, 0x00700003, ++ 0x00415e06, 0x00415f05, 0x0060000d, 0x00700005, 0x0070000d, 0x00700006, ++ 0x0070000b, 0x0070000e, 0x0070001c, 0x0060000c, ~0 ++}; ++ ++static uint32_t nv86_ctx_voodoo[] = { ++ 0x0070008e, 0x0070009c, 0x00200020, 0x00600008, 0x0050004c, 0x00400e89, ++ 0x00200000, 0x00600007, 0x00300000, 0x00c000ff, 0x00200000, 0x008000ff, ++ 0x00700009, 0x0040dd4d, 0x00402944, 0x00402905, 0x0040290d, 0x0040b906, ++ 0x00600005, 0x004015c5, 0x00600011, 0x0040270b, 0x004021c5, 0x00700000, ++ 0x00700081, 0x00600004, 0x0050004a, 0x00216d80, 0x00600007, 0x00c02801, ++ 0x0020002e, 0x00800001, 0x005000cb, 0x0090ffff, 0x0091ffff, 0x00200020, ++ 0x00600008, 0x0050004c, 0x00600009, 0x0040b945, 0x0040d44d, 0x0070009d, ++ 0x00402dcf, 0x0070009f, 0x0050009f, 0x00402ac0, 0x00200200, 0x00600008, ++ 0x00402a4f, 0x00402ac0, 0x004030cc, 0x00700081, 0x00200000, 0x00600006, ++ 0x00700000, 0x00111bfc, 0x00700083, 0x00300000, 0x00216d80, 0x00600007, ++ 0x00c00b01, 0x0020001e, 0x00800001, 0x005000cb, 0x00c000ff, 0x00700080, ++ 0x00700083, 0x00200047, 0x00600006, 0x0011020a, 0x00200280, 0x00600007, ++ 0x00300000, 0x00c000ff, 0x00c800ff, 0x0040c407, 0x00202916, 0x008000ff, ++ 0x0040508c, 0x005000cb, 0x00a0023f, 0x00200040, 0x00600006, 0x0070000f, ++ 0x00170202, 0x0011020a, 0x00200032, 0x0010020d, 0x001c0242, 0x00120302, ++ 0x00140402, 0x00180500, 0x00130509, 0x00150550, 0x00110605, 0x0020000f, ++ 0x00100607, 0x00110700, 0x00110900, 0x00120902, 0x00110a00, 0x00160b02, ++ 0x00120b28, 0x00140b2b, 0x00110c01, 0x00111400, 0x00111405, 0x00111407, ++ 0x00111409, 0x0011140b, 0x002000cb, 0x00101500, 0x0040790f, 0x0040794b, ++ 0x00214b40, 0x00600007, 0x00200442, 0x008800ff, 0x0070008f, 0x0040798c, ++ 0x005000cb, 0x00000000, 0x0020002b, 0x00101a05, 0x00131c00, 0x00121c04, ++ 0x00141c20, 0x00111c25, 0x00131c40, 0x00121c44, 0x00141c60, 0x00111c65, ++ 0x00131f00, 0x00191f40, 0x004099e0, 0x002001d9, 0x00600006, 0x00200044, ++ 0x00102080, 0x001120c6, 0x001520c9, 0x001920d0, 0x00122100, 0x00122103, ++ 0x00162200, 0x00122207, 0x00112280, 0x00112300, 0x00112302, 0x00122380, ++ 0x0011238b, 0x00112394, 0x0011239c, 0x00000000, 0x0040a00f, 0x005000cb, ++ 0x00214b40, 0x00600007, 0x00200442, 0x008800ff, 0x005000cb, 0x0040a387, ++ 0x0060000a, 0x00000000, 0x0040b200, 0x007000a0, 0x00700080, 0x00200280, ++ 0x00600007, 0x00200004, 0x00c000ff, 0x008000ff, 0x005000cb, 0x00700000, ++ 0x00200000, 0x00600006, 0x00111bfe, 0x0040d44d, 0x00700000, 0x00200000, ++ 0x00600006, 0x00111bfe, 0x00700080, 0x0070001d, 0x0040114d, 0x00700081, ++ 0x00600004, 0x0050004a, 0x0040be88, 0x0060000b, 0x00200000, 0x00600006, ++ 0x00700000, 0x0040d40b, 0x00111bfd, 0x0040424d, 0x00202916, 0x008000fd, ++ 0x005000cb, 0x00c00002, 0x00200280, 0x00600007, 0x00200160, 0x00800002, ++ 0x005000cb, 0x00c01802, 0x002027b6, 0x00800002, 0x005000cb, 0x00404e4d, ++ 0x0060000b, 0x0040d24d, 0x00700001, 0x00700003, 0x0040d806, 0x0040d905, ++ 0x0060000d, 0x00700005, 0x0070000d, 0x00700006, 0x0070000b, 0x0070000e, ++ 0x0060000c, ~0 ++}; ++ ++static int ++nv50_graph_init_ctxctl(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t *voodoo = NULL; ++ ++ DRM_DEBUG("\n"); ++ ++ switch (dev_priv->chipset) { ++ case 0x84: ++ voodoo = nv84_ctx_voodoo; ++ break; ++ case 0x86: ++ voodoo = nv86_ctx_voodoo; ++ break; ++ default: ++ DRM_ERROR("no voodoo for chipset NV%02x\n", dev_priv->chipset); ++ return -EINVAL; ++ } ++ ++ NV_WRITE(NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0); ++ while (*voodoo != ~0) { ++ NV_WRITE(NV40_PGRAPH_CTXCTL_UCODE_DATA, *voodoo); ++ voodoo++; ++ } ++ ++ NV_WRITE(0x400320, 4); ++ NV_WRITE(NV40_PGRAPH_CTXCTL_CUR, 0); ++ NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, 0); ++ ++ return 0; ++} ++ ++int ++nv50_graph_init(struct drm_device *dev) ++{ ++ int ret; ++ ++ DRM_DEBUG("\n"); ++ ++ nv50_graph_init_reset(dev); ++ nv50_graph_init_intr(dev); ++ nv50_graph_init_regs__nv(dev); ++ nv50_graph_init_regs(dev); ++ ++ ret = nv50_graph_init_ctxctl(dev); ++ if (ret) ++ return ret; ++ ++ return 0; ++} ++ ++void ++nv50_graph_takedown(struct drm_device *dev) ++{ ++ DRM_DEBUG("\n"); ++} ++ ++static void ++nv86_graph_init_ctxvals(struct drm_device *dev, struct nouveau_gpuobj_ref *ref) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_gpuobj *ctx = ref->gpuobj; ++ ++ INSTANCE_WR(ctx, 0x10C/4, 0x30); ++ INSTANCE_WR(ctx, 0x1D4/4, 0x3); ++ INSTANCE_WR(ctx, 0x1D8/4, 0x1000); ++ INSTANCE_WR(ctx, 0x218/4, 0xFE0C); ++ INSTANCE_WR(ctx, 0x22C/4, 0x1000); ++ INSTANCE_WR(ctx, 0x258/4, 0x187); ++ INSTANCE_WR(ctx, 0x26C/4, 0x1018); ++ INSTANCE_WR(ctx, 0x270/4, 0xFF); ++ INSTANCE_WR(ctx, 0x2AC/4, 0x4); ++ INSTANCE_WR(ctx, 0x2B0/4, 0x44D00DF); ++ INSTANCE_WR(ctx, 0x2B8/4, 0x600); ++ INSTANCE_WR(ctx, 0x2D0/4, 0x1000000); ++ INSTANCE_WR(ctx, 0x2D4/4, 0xFF); ++ INSTANCE_WR(ctx, 0x2DC/4, 0x400); ++ INSTANCE_WR(ctx, 0x2F4/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F8/4, 0x80); ++ INSTANCE_WR(ctx, 0x2FC/4, 0x4); ++ INSTANCE_WR(ctx, 0x318/4, 0x2); ++ INSTANCE_WR(ctx, 0x31C/4, 0x1); ++ INSTANCE_WR(ctx, 0x328/4, 0x1); ++ INSTANCE_WR(ctx, 0x32C/4, 0x100); ++ INSTANCE_WR(ctx, 0x344/4, 0x2); ++ INSTANCE_WR(ctx, 0x348/4, 0x1); ++ INSTANCE_WR(ctx, 0x34C/4, 0x1); ++ INSTANCE_WR(ctx, 0x35C/4, 0x1); ++ INSTANCE_WR(ctx, 0x360/4, 0x3FFFFF); ++ INSTANCE_WR(ctx, 0x364/4, 0x1FFF); ++ INSTANCE_WR(ctx, 0x36C/4, 0x1); ++ INSTANCE_WR(ctx, 0x370/4, 0x1); ++ INSTANCE_WR(ctx, 0x378/4, 0x1); ++ INSTANCE_WR(ctx, 0x37C/4, 0x1); ++ INSTANCE_WR(ctx, 0x380/4, 0x1); ++ INSTANCE_WR(ctx, 0x384/4, 0x4); ++ INSTANCE_WR(ctx, 0x388/4, 0x1); ++ INSTANCE_WR(ctx, 0x38C/4, 0x1); ++ INSTANCE_WR(ctx, 0x390/4, 0x1); ++ INSTANCE_WR(ctx, 0x394/4, 0x7); ++ INSTANCE_WR(ctx, 0x398/4, 0x1); ++ INSTANCE_WR(ctx, 0x39C/4, 0x7); ++ INSTANCE_WR(ctx, 0x3A0/4, 0x1); ++ INSTANCE_WR(ctx, 0x3A4/4, 0x1); ++ INSTANCE_WR(ctx, 0x3A8/4, 0x1); ++ INSTANCE_WR(ctx, 0x3BC/4, 0x1); ++ INSTANCE_WR(ctx, 0x3C0/4, 0x100); ++ INSTANCE_WR(ctx, 0x3C8/4, 0x1); ++ INSTANCE_WR(ctx, 0x3D4/4, 0x100); ++ INSTANCE_WR(ctx, 0x3D8/4, 0x1); ++ INSTANCE_WR(ctx, 0x3DC/4, 0x100); ++ INSTANCE_WR(ctx, 0x3E4/4, 0x1); ++ INSTANCE_WR(ctx, 0x3F0/4, 0x100); ++ INSTANCE_WR(ctx, 0x404/4, 0x4); ++ INSTANCE_WR(ctx, 0x408/4, 0x70); ++ INSTANCE_WR(ctx, 0x40C/4, 0x80); ++ INSTANCE_WR(ctx, 0x420/4, 0xC); ++ INSTANCE_WR(ctx, 0x428/4, 0x8); ++ INSTANCE_WR(ctx, 0x42C/4, 0x14); ++ INSTANCE_WR(ctx, 0x434/4, 0x29); ++ INSTANCE_WR(ctx, 0x438/4, 0x27); ++ INSTANCE_WR(ctx, 0x43C/4, 0x26); ++ INSTANCE_WR(ctx, 0x440/4, 0x8); ++ INSTANCE_WR(ctx, 0x444/4, 0x4); ++ INSTANCE_WR(ctx, 0x448/4, 0x27); ++ INSTANCE_WR(ctx, 0x454/4, 0x1); ++ INSTANCE_WR(ctx, 0x458/4, 0x2); ++ INSTANCE_WR(ctx, 0x45C/4, 0x3); ++ INSTANCE_WR(ctx, 0x460/4, 0x4); ++ INSTANCE_WR(ctx, 0x464/4, 0x5); ++ INSTANCE_WR(ctx, 0x468/4, 0x6); ++ INSTANCE_WR(ctx, 0x46C/4, 0x7); ++ INSTANCE_WR(ctx, 0x470/4, 0x1); ++ INSTANCE_WR(ctx, 0x4B4/4, 0xCF); ++ INSTANCE_WR(ctx, 0x4E4/4, 0x80); ++ INSTANCE_WR(ctx, 0x4E8/4, 0x4); ++ INSTANCE_WR(ctx, 0x4EC/4, 0x4); ++ INSTANCE_WR(ctx, 0x4F0/4, 0x3); ++ INSTANCE_WR(ctx, 0x4F4/4, 0x1); ++ INSTANCE_WR(ctx, 0x500/4, 0x12); ++ INSTANCE_WR(ctx, 0x504/4, 0x10); ++ INSTANCE_WR(ctx, 0x508/4, 0xC); ++ INSTANCE_WR(ctx, 0x50C/4, 0x1); ++ INSTANCE_WR(ctx, 0x51C/4, 0x4); ++ INSTANCE_WR(ctx, 0x520/4, 0x2); ++ INSTANCE_WR(ctx, 0x524/4, 0x4); ++ INSTANCE_WR(ctx, 0x530/4, 0x3FFFFF); ++ INSTANCE_WR(ctx, 0x534/4, 0x1FFF); ++ INSTANCE_WR(ctx, 0x55C/4, 0x4); ++ INSTANCE_WR(ctx, 0x560/4, 0x14); ++ INSTANCE_WR(ctx, 0x564/4, 0x1); ++ INSTANCE_WR(ctx, 0x570/4, 0x2); ++ INSTANCE_WR(ctx, 0x57C/4, 0x1); ++ INSTANCE_WR(ctx, 0x584/4, 0x2); ++ INSTANCE_WR(ctx, 0x588/4, 0x1000); ++ INSTANCE_WR(ctx, 0x58C/4, 0xE00); ++ INSTANCE_WR(ctx, 0x590/4, 0x1000); ++ INSTANCE_WR(ctx, 0x594/4, 0x1E00); ++ INSTANCE_WR(ctx, 0x59C/4, 0x1); ++ INSTANCE_WR(ctx, 0x5A0/4, 0x1); ++ INSTANCE_WR(ctx, 0x5A4/4, 0x1); ++ INSTANCE_WR(ctx, 0x5A8/4, 0x1); ++ INSTANCE_WR(ctx, 0x5AC/4, 0x1); ++ INSTANCE_WR(ctx, 0x5BC/4, 0x200); ++ INSTANCE_WR(ctx, 0x5C4/4, 0x1); ++ INSTANCE_WR(ctx, 0x5C8/4, 0x70); ++ INSTANCE_WR(ctx, 0x5CC/4, 0x80); ++ INSTANCE_WR(ctx, 0x5D8/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DC/4, 0x70); ++ INSTANCE_WR(ctx, 0x5E0/4, 0x80); ++ INSTANCE_WR(ctx, 0x5F0/4, 0x1); ++ INSTANCE_WR(ctx, 0x5F4/4, 0xCF); ++ INSTANCE_WR(ctx, 0x5FC/4, 0x1); ++ INSTANCE_WR(ctx, 0x60C/4, 0xCF); ++ INSTANCE_WR(ctx, 0x614/4, 0x2); ++ INSTANCE_WR(ctx, 0x61C/4, 0x1); ++ INSTANCE_WR(ctx, 0x624/4, 0x1); ++ INSTANCE_WR(ctx, 0x62C/4, 0xCF); ++ INSTANCE_WR(ctx, 0x630/4, 0xCF); ++ INSTANCE_WR(ctx, 0x634/4, 0x1); ++ INSTANCE_WR(ctx, 0x63C/4, 0xF80); ++ INSTANCE_WR(ctx, 0x684/4, 0x7F0080); ++ INSTANCE_WR(ctx, 0x6C0/4, 0x7F0080); ++ INSTANCE_WR(ctx, 0x6E4/4, 0x3B74F821); ++ INSTANCE_WR(ctx, 0x6E8/4, 0x89058001); ++ INSTANCE_WR(ctx, 0x6F0/4, 0x1000); ++ INSTANCE_WR(ctx, 0x6F4/4, 0x1F); ++ INSTANCE_WR(ctx, 0x6F8/4, 0x27C10FA); ++ INSTANCE_WR(ctx, 0x6FC/4, 0x400000C0); ++ INSTANCE_WR(ctx, 0x700/4, 0xB7892080); ++ INSTANCE_WR(ctx, 0x70C/4, 0x3B74F821); ++ INSTANCE_WR(ctx, 0x710/4, 0x89058001); ++ INSTANCE_WR(ctx, 0x718/4, 0x1000); ++ INSTANCE_WR(ctx, 0x71C/4, 0x1F); ++ INSTANCE_WR(ctx, 0x720/4, 0x27C10FA); ++ INSTANCE_WR(ctx, 0x724/4, 0x400000C0); ++ INSTANCE_WR(ctx, 0x728/4, 0xB7892080); ++ INSTANCE_WR(ctx, 0x734/4, 0x10040); ++ INSTANCE_WR(ctx, 0x73C/4, 0x22); ++ INSTANCE_WR(ctx, 0x748/4, 0x10040); ++ INSTANCE_WR(ctx, 0x74C/4, 0x22); ++ INSTANCE_WR(ctx, 0x764/4, 0x1800000); ++ INSTANCE_WR(ctx, 0x768/4, 0x160000); ++ INSTANCE_WR(ctx, 0x76C/4, 0x1800000); ++ INSTANCE_WR(ctx, 0x77C/4, 0x3FFFF); ++ INSTANCE_WR(ctx, 0x780/4, 0x8C0000); ++ INSTANCE_WR(ctx, 0x7A4/4, 0x10401); ++ INSTANCE_WR(ctx, 0x7AC/4, 0x78); ++ INSTANCE_WR(ctx, 0x7B4/4, 0xBF); ++ INSTANCE_WR(ctx, 0x7BC/4, 0x1210); ++ INSTANCE_WR(ctx, 0x7C0/4, 0x8000080); ++ INSTANCE_WR(ctx, 0x7E4/4, 0x1800000); ++ INSTANCE_WR(ctx, 0x7E8/4, 0x160000); ++ INSTANCE_WR(ctx, 0x7EC/4, 0x1800000); ++ INSTANCE_WR(ctx, 0x7FC/4, 0x3FFFF); ++ INSTANCE_WR(ctx, 0x800/4, 0x8C0000); ++ INSTANCE_WR(ctx, 0x824/4, 0x10401); ++ INSTANCE_WR(ctx, 0x82C/4, 0x78); ++ INSTANCE_WR(ctx, 0x834/4, 0xBF); ++ INSTANCE_WR(ctx, 0x83C/4, 0x1210); ++ INSTANCE_WR(ctx, 0x840/4, 0x8000080); ++ INSTANCE_WR(ctx, 0x868/4, 0x27070); ++ INSTANCE_WR(ctx, 0x874/4, 0x3FFFFFF); ++ INSTANCE_WR(ctx, 0x88C/4, 0x120407); ++ INSTANCE_WR(ctx, 0x890/4, 0x5091507); ++ INSTANCE_WR(ctx, 0x894/4, 0x5010202); ++ INSTANCE_WR(ctx, 0x898/4, 0x30201); ++ INSTANCE_WR(ctx, 0x8B4/4, 0x40); ++ INSTANCE_WR(ctx, 0x8B8/4, 0xD0C0B0A); ++ INSTANCE_WR(ctx, 0x8BC/4, 0x141210); ++ INSTANCE_WR(ctx, 0x8C0/4, 0x1F0); ++ INSTANCE_WR(ctx, 0x8C4/4, 0x1); ++ INSTANCE_WR(ctx, 0x8C8/4, 0x3); ++ INSTANCE_WR(ctx, 0x8D4/4, 0x39E00); ++ INSTANCE_WR(ctx, 0x8D8/4, 0x100); ++ INSTANCE_WR(ctx, 0x8DC/4, 0x3800); ++ INSTANCE_WR(ctx, 0x8E0/4, 0x404040); ++ INSTANCE_WR(ctx, 0x8E4/4, 0xFF0A); ++ INSTANCE_WR(ctx, 0x8EC/4, 0x77F005); ++ INSTANCE_WR(ctx, 0x8F0/4, 0x3F7FFF); ++ INSTANCE_WR(ctx, 0x7BA0/4, 0x21); ++ INSTANCE_WR(ctx, 0x7BC0/4, 0x1); ++ INSTANCE_WR(ctx, 0x7BE0/4, 0x2); ++ INSTANCE_WR(ctx, 0x7C00/4, 0x100); ++ INSTANCE_WR(ctx, 0x7C20/4, 0x100); ++ INSTANCE_WR(ctx, 0x7C40/4, 0x1); ++ INSTANCE_WR(ctx, 0x7CA0/4, 0x1); ++ INSTANCE_WR(ctx, 0x7CC0/4, 0x2); ++ INSTANCE_WR(ctx, 0x7CE0/4, 0x100); ++ INSTANCE_WR(ctx, 0x7D00/4, 0x100); ++ INSTANCE_WR(ctx, 0x7D20/4, 0x1); ++ INSTANCE_WR(ctx, 0x11640/4, 0x4); ++ INSTANCE_WR(ctx, 0x11660/4, 0x4); ++ INSTANCE_WR(ctx, 0x49FE0/4, 0x4); ++ INSTANCE_WR(ctx, 0x4A000/4, 0x4); ++ INSTANCE_WR(ctx, 0x4A020/4, 0x8100C12); ++ INSTANCE_WR(ctx, 0x4A040/4, 0x3); ++ INSTANCE_WR(ctx, 0x4A080/4, 0x8100C12); ++ INSTANCE_WR(ctx, 0x4A0C0/4, 0x80C14); ++ INSTANCE_WR(ctx, 0x4A0E0/4, 0x1); ++ INSTANCE_WR(ctx, 0x4A100/4, 0x80C14); ++ INSTANCE_WR(ctx, 0x4A160/4, 0x8100C12); ++ INSTANCE_WR(ctx, 0x4A180/4, 0x27); ++ INSTANCE_WR(ctx, 0x4A1E0/4, 0x1); ++ INSTANCE_WR(ctx, 0x51A20/4, 0x1); ++ INSTANCE_WR(ctx, 0x51D00/4, 0x8100C12); ++ INSTANCE_WR(ctx, 0x51EA0/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x51EC0/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x51F00/4, 0x80); ++ INSTANCE_WR(ctx, 0x51F80/4, 0x80); ++ INSTANCE_WR(ctx, 0x51FC0/4, 0x3F); ++ INSTANCE_WR(ctx, 0x52120/4, 0x2); ++ INSTANCE_WR(ctx, 0x52140/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x52160/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x52280/4, 0x4); ++ INSTANCE_WR(ctx, 0x52300/4, 0x4); ++ INSTANCE_WR(ctx, 0x52540/4, 0x1); ++ INSTANCE_WR(ctx, 0x52560/4, 0x1001); ++ INSTANCE_WR(ctx, 0x52580/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x525A0/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x525C0/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x525E0/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x52A00/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x52A20/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x52A40/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x52A60/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x52A80/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x52AA0/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x52AC0/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x52AE0/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x52B00/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x52B20/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x52B40/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x52B60/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x52B80/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x52BA0/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x52BC0/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x52BE0/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x52C00/4, 0x10); ++ INSTANCE_WR(ctx, 0x52C60/4, 0x3); ++ INSTANCE_WR(ctx, 0xA84/4, 0xF); ++ INSTANCE_WR(ctx, 0xB24/4, 0x20); ++ INSTANCE_WR(ctx, 0xD04/4, 0x1A); ++ INSTANCE_WR(ctx, 0xEC4/4, 0x4); ++ INSTANCE_WR(ctx, 0xEE4/4, 0x4); ++ INSTANCE_WR(ctx, 0xF24/4, 0x4); ++ INSTANCE_WR(ctx, 0xF44/4, 0x8); ++ INSTANCE_WR(ctx, 0xF84/4, 0x7FF); ++ INSTANCE_WR(ctx, 0x1124/4, 0xF); ++ INSTANCE_WR(ctx, 0x3604/4, 0xF); ++ INSTANCE_WR(ctx, 0x3644/4, 0x1); ++ INSTANCE_WR(ctx, 0x41A4/4, 0xF); ++ INSTANCE_WR(ctx, 0x14844/4, 0xF); ++ INSTANCE_WR(ctx, 0x14AE4/4, 0x1); ++ INSTANCE_WR(ctx, 0x14B04/4, 0x100); ++ INSTANCE_WR(ctx, 0x14B24/4, 0x100); ++ INSTANCE_WR(ctx, 0x14B44/4, 0x11); ++ INSTANCE_WR(ctx, 0x14B84/4, 0x8); ++ INSTANCE_WR(ctx, 0x14C44/4, 0x1); ++ INSTANCE_WR(ctx, 0x14C84/4, 0x1); ++ INSTANCE_WR(ctx, 0x14CA4/4, 0x1); ++ INSTANCE_WR(ctx, 0x14CC4/4, 0x1); ++ INSTANCE_WR(ctx, 0x14CE4/4, 0xCF); ++ INSTANCE_WR(ctx, 0x14D04/4, 0x2); ++ INSTANCE_WR(ctx, 0x14DE4/4, 0x1); ++ INSTANCE_WR(ctx, 0x14E24/4, 0x1); ++ INSTANCE_WR(ctx, 0x14E44/4, 0x1); ++ INSTANCE_WR(ctx, 0x14E64/4, 0x1); ++ INSTANCE_WR(ctx, 0x14F04/4, 0x4); ++ INSTANCE_WR(ctx, 0x14F44/4, 0x1); ++ INSTANCE_WR(ctx, 0x14F64/4, 0x15); ++ INSTANCE_WR(ctx, 0x14FE4/4, 0x4444480); ++ INSTANCE_WR(ctx, 0x15764/4, 0x8100C12); ++ INSTANCE_WR(ctx, 0x15804/4, 0x100); ++ INSTANCE_WR(ctx, 0x15864/4, 0x10001); ++ INSTANCE_WR(ctx, 0x158A4/4, 0x10001); ++ INSTANCE_WR(ctx, 0x158C4/4, 0x1); ++ INSTANCE_WR(ctx, 0x158E4/4, 0x10001); ++ INSTANCE_WR(ctx, 0x15904/4, 0x1); ++ INSTANCE_WR(ctx, 0x15924/4, 0x4); ++ INSTANCE_WR(ctx, 0x15944/4, 0x2); ++ INSTANCE_WR(ctx, 0x166C4/4, 0x4E3BFDF); ++ INSTANCE_WR(ctx, 0x166E4/4, 0x4E3BFDF); ++ INSTANCE_WR(ctx, 0x16784/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x16904/4, 0x4E3BFDF); ++ INSTANCE_WR(ctx, 0x16924/4, 0x4E3BFDF); ++ INSTANCE_WR(ctx, 0x15948/4, 0x3FFFFF); ++ INSTANCE_WR(ctx, 0x159A8/4, 0x1FFF); ++ INSTANCE_WR(ctx, 0x15B88/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x15C68/4, 0x4); ++ INSTANCE_WR(ctx, 0x15C88/4, 0x1A); ++ INSTANCE_WR(ctx, 0x15CE8/4, 0x1); ++ INSTANCE_WR(ctx, 0x15F48/4, 0xFFFF00); ++ INSTANCE_WR(ctx, 0x16028/4, 0xF); ++ INSTANCE_WR(ctx, 0x16128/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x16148/4, 0x11); ++ INSTANCE_WR(ctx, 0x16348/4, 0x4); ++ INSTANCE_WR(ctx, 0x163E8/4, 0x2); ++ INSTANCE_WR(ctx, 0x16408/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x16428/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x164A8/4, 0x5); ++ INSTANCE_WR(ctx, 0x164C8/4, 0x52); ++ INSTANCE_WR(ctx, 0x16568/4, 0x1); ++ INSTANCE_WR(ctx, 0x16788/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x167A8/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x167C8/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x167E8/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x16808/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x16828/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x16848/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x16868/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x16888/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x168A8/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x168C8/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x168E8/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x16908/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x16928/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x16948/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x16968/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x16988/4, 0x10); ++ INSTANCE_WR(ctx, 0x16E68/4, 0x8100C12); ++ INSTANCE_WR(ctx, 0x16E88/4, 0x5); ++ INSTANCE_WR(ctx, 0x16EE8/4, 0x1); ++ INSTANCE_WR(ctx, 0x16F28/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x16F48/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x16F68/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x16F88/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x16FA8/4, 0x3); ++ INSTANCE_WR(ctx, 0x173A8/4, 0xFFFF00); ++ INSTANCE_WR(ctx, 0x173C8/4, 0x1A); ++ INSTANCE_WR(ctx, 0x17408/4, 0x3); ++ INSTANCE_WR(ctx, 0x178E8/4, 0x102); ++ INSTANCE_WR(ctx, 0x17928/4, 0x4); ++ INSTANCE_WR(ctx, 0x17948/4, 0x4); ++ INSTANCE_WR(ctx, 0x17968/4, 0x4); ++ INSTANCE_WR(ctx, 0x17988/4, 0x4); ++ INSTANCE_WR(ctx, 0x179A8/4, 0x4); ++ INSTANCE_WR(ctx, 0x179C8/4, 0x4); ++ INSTANCE_WR(ctx, 0x17A08/4, 0x7FF); ++ INSTANCE_WR(ctx, 0x17A48/4, 0x102); ++ INSTANCE_WR(ctx, 0x17B88/4, 0x4); ++ INSTANCE_WR(ctx, 0x17BA8/4, 0x4); ++ INSTANCE_WR(ctx, 0x17BC8/4, 0x4); ++ INSTANCE_WR(ctx, 0x17BE8/4, 0x4); ++ INSTANCE_WR(ctx, 0x18228/4, 0x80C14); ++ INSTANCE_WR(ctx, 0x18288/4, 0x804); ++ INSTANCE_WR(ctx, 0x182C8/4, 0x4); ++ INSTANCE_WR(ctx, 0x182E8/4, 0x4); ++ INSTANCE_WR(ctx, 0x18308/4, 0x8100C12); ++ INSTANCE_WR(ctx, 0x18348/4, 0x4); ++ INSTANCE_WR(ctx, 0x18368/4, 0x4); ++ INSTANCE_WR(ctx, 0x183A8/4, 0x10); ++ INSTANCE_WR(ctx, 0x18448/4, 0x804); ++ INSTANCE_WR(ctx, 0x18468/4, 0x1); ++ INSTANCE_WR(ctx, 0x18488/4, 0x1A); ++ INSTANCE_WR(ctx, 0x184A8/4, 0x7F); ++ INSTANCE_WR(ctx, 0x184E8/4, 0x1); ++ INSTANCE_WR(ctx, 0x18508/4, 0x80C14); ++ INSTANCE_WR(ctx, 0x18548/4, 0x8100C12); ++ INSTANCE_WR(ctx, 0x18568/4, 0x4); ++ INSTANCE_WR(ctx, 0x18588/4, 0x4); ++ INSTANCE_WR(ctx, 0x185C8/4, 0x10); ++ INSTANCE_WR(ctx, 0x18648/4, 0x1); ++ INSTANCE_WR(ctx, 0x18668/4, 0x8100C12); ++ INSTANCE_WR(ctx, 0x18748/4, 0x7FF); ++ INSTANCE_WR(ctx, 0x18768/4, 0x80C14); ++ INSTANCE_WR(ctx, 0x18E88/4, 0x1); ++ INSTANCE_WR(ctx, 0x18EE8/4, 0x10); ++ INSTANCE_WR(ctx, 0x19608/4, 0x88); ++ INSTANCE_WR(ctx, 0x19628/4, 0x88); ++ INSTANCE_WR(ctx, 0x19688/4, 0x4); ++ INSTANCE_WR(ctx, 0x19968/4, 0x26); ++ INSTANCE_WR(ctx, 0x199C8/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x19A48/4, 0x1A); ++ INSTANCE_WR(ctx, 0x19A68/4, 0x10); ++ INSTANCE_WR(ctx, 0x19F88/4, 0x52); ++ INSTANCE_WR(ctx, 0x19FC8/4, 0x26); ++ INSTANCE_WR(ctx, 0x1A008/4, 0x4); ++ INSTANCE_WR(ctx, 0x1A028/4, 0x4); ++ INSTANCE_WR(ctx, 0x1A068/4, 0x1A); ++ INSTANCE_WR(ctx, 0x1A0C8/4, 0xFFFF00); ++ INSTANCE_WR(ctx, 0x1A108/4, 0x4); ++ INSTANCE_WR(ctx, 0x1A128/4, 0x4); ++ INSTANCE_WR(ctx, 0x1A168/4, 0x80); ++ INSTANCE_WR(ctx, 0x1A188/4, 0x4); ++ INSTANCE_WR(ctx, 0x1A1A8/4, 0x80C14); ++ INSTANCE_WR(ctx, 0x1A1E8/4, 0x7FF); ++ INSTANCE_WR(ctx, 0x24A48/4, 0x4); ++ INSTANCE_WR(ctx, 0x24A68/4, 0x4); ++ INSTANCE_WR(ctx, 0x24AA8/4, 0x80); ++ INSTANCE_WR(ctx, 0x24AC8/4, 0x4); ++ INSTANCE_WR(ctx, 0x24AE8/4, 0x1); ++ INSTANCE_WR(ctx, 0x24B28/4, 0x27); ++ INSTANCE_WR(ctx, 0x24B68/4, 0x26); ++ INSTANCE_WR(ctx, 0x24BE8/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x24C08/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x24C28/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x24C48/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x24C68/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x24C88/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x24CA8/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x24CC8/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x24CE8/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x24D08/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x24D28/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x24D48/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x24D68/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x24D88/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x24DA8/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x24DC8/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x25268/4, 0x4E3BFDF); ++ INSTANCE_WR(ctx, 0x25288/4, 0x4E3BFDF); ++ INSTANCE_WR(ctx, 0x252E8/4, 0x1FE21); ++ INSTANCE_WR(ctx, 0xB0C/4, 0x2); ++ INSTANCE_WR(ctx, 0xB4C/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0xCEC/4, 0x1); ++ INSTANCE_WR(ctx, 0xD0C/4, 0x10); ++ INSTANCE_WR(ctx, 0xD6C/4, 0x1); ++ INSTANCE_WR(ctx, 0xE0C/4, 0x4); ++ INSTANCE_WR(ctx, 0xE2C/4, 0x400); ++ INSTANCE_WR(ctx, 0xE4C/4, 0x300); ++ INSTANCE_WR(ctx, 0xE6C/4, 0x1001); ++ INSTANCE_WR(ctx, 0xE8C/4, 0x15); ++ INSTANCE_WR(ctx, 0xF4C/4, 0x2); ++ INSTANCE_WR(ctx, 0x106C/4, 0x1); ++ INSTANCE_WR(ctx, 0x108C/4, 0x10); ++ INSTANCE_WR(ctx, 0x10CC/4, 0x1); ++ INSTANCE_WR(ctx, 0x134C/4, 0x10); ++ INSTANCE_WR(ctx, 0x156C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x158C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x15AC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x15CC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x15EC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x160C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x162C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x164C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x166C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x168C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x16AC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x16CC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x16EC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x170C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x172C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x174C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x1A8C/4, 0x10); ++ INSTANCE_WR(ctx, 0x1ACC/4, 0x3F); ++ INSTANCE_WR(ctx, 0x1BAC/4, 0x1); ++ INSTANCE_WR(ctx, 0x1BEC/4, 0x1); ++ INSTANCE_WR(ctx, 0x1C2C/4, 0x1); ++ INSTANCE_WR(ctx, 0x1DCC/4, 0x11); ++ INSTANCE_WR(ctx, 0x1ECC/4, 0xF); ++ INSTANCE_WR(ctx, 0x1FCC/4, 0x11); ++ INSTANCE_WR(ctx, 0x20AC/4, 0x1); ++ INSTANCE_WR(ctx, 0x20CC/4, 0x1); ++ INSTANCE_WR(ctx, 0x20EC/4, 0x1); ++ INSTANCE_WR(ctx, 0x210C/4, 0x2); ++ INSTANCE_WR(ctx, 0x212C/4, 0x1); ++ INSTANCE_WR(ctx, 0x214C/4, 0x2); ++ INSTANCE_WR(ctx, 0x216C/4, 0x1); ++ INSTANCE_WR(ctx, 0x21AC/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0x21EC/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x24AC/4, 0x1); ++ INSTANCE_WR(ctx, 0x24CC/4, 0x2); ++ INSTANCE_WR(ctx, 0x24EC/4, 0x1); ++ INSTANCE_WR(ctx, 0x250C/4, 0x1); ++ INSTANCE_WR(ctx, 0x252C/4, 0x2); ++ INSTANCE_WR(ctx, 0x254C/4, 0x1); ++ INSTANCE_WR(ctx, 0x256C/4, 0x1); ++ INSTANCE_WR(ctx, 0x25EC/4, 0x11); ++ INSTANCE_WR(ctx, 0x260C/4, 0x1); ++ INSTANCE_WR(ctx, 0x328C/4, 0x2); ++ INSTANCE_WR(ctx, 0x32CC/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0x346C/4, 0x1); ++ INSTANCE_WR(ctx, 0x348C/4, 0x10); ++ INSTANCE_WR(ctx, 0x34EC/4, 0x1); ++ INSTANCE_WR(ctx, 0x358C/4, 0x4); ++ INSTANCE_WR(ctx, 0x35AC/4, 0x400); ++ INSTANCE_WR(ctx, 0x35CC/4, 0x300); ++ INSTANCE_WR(ctx, 0x35EC/4, 0x1001); ++ INSTANCE_WR(ctx, 0x360C/4, 0x15); ++ INSTANCE_WR(ctx, 0x36CC/4, 0x2); ++ INSTANCE_WR(ctx, 0x37EC/4, 0x1); ++ INSTANCE_WR(ctx, 0x380C/4, 0x10); ++ INSTANCE_WR(ctx, 0x384C/4, 0x1); ++ INSTANCE_WR(ctx, 0x3ACC/4, 0x10); ++ INSTANCE_WR(ctx, 0x3CEC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x3D0C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x3D2C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x3D4C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x3D6C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x3D8C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x3DAC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x3DCC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x3DEC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x3E0C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x3E2C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x3E4C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x3E6C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x3E8C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x3EAC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x3ECC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x420C/4, 0x10); ++ INSTANCE_WR(ctx, 0x424C/4, 0x3F); ++ INSTANCE_WR(ctx, 0x432C/4, 0x1); ++ INSTANCE_WR(ctx, 0x436C/4, 0x1); ++ INSTANCE_WR(ctx, 0x43AC/4, 0x1); ++ INSTANCE_WR(ctx, 0x454C/4, 0x11); ++ INSTANCE_WR(ctx, 0x464C/4, 0xF); ++ INSTANCE_WR(ctx, 0x474C/4, 0x11); ++ INSTANCE_WR(ctx, 0x482C/4, 0x1); ++ INSTANCE_WR(ctx, 0x484C/4, 0x1); ++ INSTANCE_WR(ctx, 0x486C/4, 0x1); ++ INSTANCE_WR(ctx, 0x488C/4, 0x2); ++ INSTANCE_WR(ctx, 0x48AC/4, 0x1); ++ INSTANCE_WR(ctx, 0x48CC/4, 0x2); ++ INSTANCE_WR(ctx, 0x48EC/4, 0x1); ++ INSTANCE_WR(ctx, 0x492C/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0x496C/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x4C2C/4, 0x1); ++ INSTANCE_WR(ctx, 0x4C4C/4, 0x2); ++ INSTANCE_WR(ctx, 0x4C6C/4, 0x1); ++ INSTANCE_WR(ctx, 0x4C8C/4, 0x1); ++ INSTANCE_WR(ctx, 0x4CAC/4, 0x2); ++ INSTANCE_WR(ctx, 0x4CCC/4, 0x1); ++ INSTANCE_WR(ctx, 0x4CEC/4, 0x1); ++ INSTANCE_WR(ctx, 0x4D6C/4, 0x11); ++ INSTANCE_WR(ctx, 0x4D8C/4, 0x1); ++ INSTANCE_WR(ctx, 0xA30/4, 0x4); ++ INSTANCE_WR(ctx, 0xCF0/4, 0x4); ++ INSTANCE_WR(ctx, 0xD10/4, 0x4); ++ INSTANCE_WR(ctx, 0xD30/4, 0x608080); ++ INSTANCE_WR(ctx, 0xDD0/4, 0x4); ++ INSTANCE_WR(ctx, 0xE30/4, 0x4); ++ INSTANCE_WR(ctx, 0xE50/4, 0x4); ++ INSTANCE_WR(ctx, 0xE70/4, 0x80); ++ INSTANCE_WR(ctx, 0xE90/4, 0x1E00); ++ INSTANCE_WR(ctx, 0xEB0/4, 0x4); ++ INSTANCE_WR(ctx, 0x1350/4, 0x4); ++ INSTANCE_WR(ctx, 0x1370/4, 0x80); ++ INSTANCE_WR(ctx, 0x1390/4, 0x4); ++ INSTANCE_WR(ctx, 0x13B0/4, 0x3020100); ++ INSTANCE_WR(ctx, 0x13D0/4, 0x3); ++ INSTANCE_WR(ctx, 0x13F0/4, 0x1E00); ++ INSTANCE_WR(ctx, 0x1410/4, 0x4); ++ INSTANCE_WR(ctx, 0x14B0/4, 0x4); ++ INSTANCE_WR(ctx, 0x14D0/4, 0x3); ++ INSTANCE_WR(ctx, 0x1550/4, 0x4); ++ INSTANCE_WR(ctx, 0x159F0/4, 0x4); ++ INSTANCE_WR(ctx, 0x15A10/4, 0x3); ++ INSTANCE_WR(ctx, 0x15C50/4, 0xF); ++ INSTANCE_WR(ctx, 0x15DD0/4, 0x4); ++ INSTANCE_WR(ctx, 0x15DF0/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x15E10/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x15E30/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x15E50/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x15F70/4, 0x1); ++ INSTANCE_WR(ctx, 0x15FF0/4, 0x1); ++ INSTANCE_WR(ctx, 0x160B0/4, 0x1); ++ INSTANCE_WR(ctx, 0x16250/4, 0x1); ++ INSTANCE_WR(ctx, 0x16270/4, 0x1); ++ INSTANCE_WR(ctx, 0x16290/4, 0x2); ++ INSTANCE_WR(ctx, 0x162B0/4, 0x1); ++ INSTANCE_WR(ctx, 0x162D0/4, 0x1); ++ INSTANCE_WR(ctx, 0x162F0/4, 0x2); ++ INSTANCE_WR(ctx, 0x16310/4, 0x1); ++ INSTANCE_WR(ctx, 0x16350/4, 0x11); ++ INSTANCE_WR(ctx, 0x16450/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x164B0/4, 0x4); ++ INSTANCE_WR(ctx, 0x16530/4, 0x11); ++ INSTANCE_WR(ctx, 0x16550/4, 0x1); ++ INSTANCE_WR(ctx, 0x16590/4, 0xCF); ++ INSTANCE_WR(ctx, 0x165B0/4, 0xCF); ++ INSTANCE_WR(ctx, 0x165D0/4, 0xCF); ++ INSTANCE_WR(ctx, 0x16730/4, 0x1); ++ INSTANCE_WR(ctx, 0x16750/4, 0x1); ++ INSTANCE_WR(ctx, 0x16770/4, 0x2); ++ INSTANCE_WR(ctx, 0x16790/4, 0x1); ++ INSTANCE_WR(ctx, 0x167B0/4, 0x1); ++ INSTANCE_WR(ctx, 0x167D0/4, 0x2); ++ INSTANCE_WR(ctx, 0x167F0/4, 0x1); ++ INSTANCE_WR(ctx, 0x16830/4, 0x1); ++ INSTANCE_WR(ctx, 0x16850/4, 0x1); ++ INSTANCE_WR(ctx, 0x16870/4, 0x1); ++ INSTANCE_WR(ctx, 0x16890/4, 0x1); ++ INSTANCE_WR(ctx, 0x168B0/4, 0x1); ++ INSTANCE_WR(ctx, 0x168D0/4, 0x1); ++ INSTANCE_WR(ctx, 0x168F0/4, 0x1); ++ INSTANCE_WR(ctx, 0x16910/4, 0x1); ++ INSTANCE_WR(ctx, 0x16930/4, 0x11); ++ INSTANCE_WR(ctx, 0x16A30/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x16A50/4, 0xF); ++ INSTANCE_WR(ctx, 0x16B50/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0x16BB0/4, 0x11); ++ INSTANCE_WR(ctx, 0x16BD0/4, 0x1); ++ INSTANCE_WR(ctx, 0x16C50/4, 0x4); ++ INSTANCE_WR(ctx, 0x16D10/4, 0x1); ++ INSTANCE_WR(ctx, 0x16DB0/4, 0x11); ++ INSTANCE_WR(ctx, 0x16EB0/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x16F30/4, 0x11); ++ INSTANCE_WR(ctx, 0x16F50/4, 0x1); ++ INSTANCE_WR(ctx, 0x16F90/4, 0x1); ++ INSTANCE_WR(ctx, 0x16FD0/4, 0x1); ++ INSTANCE_WR(ctx, 0x17010/4, 0x7FF); ++ INSTANCE_WR(ctx, 0x17050/4, 0x1); ++ INSTANCE_WR(ctx, 0x17090/4, 0x1); ++ INSTANCE_WR(ctx, 0x175F0/4, 0x8); ++ INSTANCE_WR(ctx, 0x17610/4, 0x8); ++ INSTANCE_WR(ctx, 0x17630/4, 0x8); ++ INSTANCE_WR(ctx, 0x17650/4, 0x8); ++ INSTANCE_WR(ctx, 0x17670/4, 0x8); ++ INSTANCE_WR(ctx, 0x17690/4, 0x8); ++ INSTANCE_WR(ctx, 0x176B0/4, 0x8); ++ INSTANCE_WR(ctx, 0x176D0/4, 0x8); ++ INSTANCE_WR(ctx, 0x176F0/4, 0x11); ++ INSTANCE_WR(ctx, 0x177F0/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x17810/4, 0x400); ++ INSTANCE_WR(ctx, 0x17830/4, 0x400); ++ INSTANCE_WR(ctx, 0x17850/4, 0x400); ++ INSTANCE_WR(ctx, 0x17870/4, 0x400); ++ INSTANCE_WR(ctx, 0x17890/4, 0x400); ++ INSTANCE_WR(ctx, 0x178B0/4, 0x400); ++ INSTANCE_WR(ctx, 0x178D0/4, 0x400); ++ INSTANCE_WR(ctx, 0x178F0/4, 0x400); ++ INSTANCE_WR(ctx, 0x17910/4, 0x300); ++ INSTANCE_WR(ctx, 0x17930/4, 0x300); ++ INSTANCE_WR(ctx, 0x17950/4, 0x300); ++ INSTANCE_WR(ctx, 0x17970/4, 0x300); ++ INSTANCE_WR(ctx, 0x17990/4, 0x300); ++ INSTANCE_WR(ctx, 0x179B0/4, 0x300); ++ INSTANCE_WR(ctx, 0x179D0/4, 0x300); ++ INSTANCE_WR(ctx, 0x179F0/4, 0x300); ++ INSTANCE_WR(ctx, 0x17A10/4, 0x1); ++ INSTANCE_WR(ctx, 0x17A30/4, 0xF); ++ INSTANCE_WR(ctx, 0x17B30/4, 0x20); ++ INSTANCE_WR(ctx, 0x17B50/4, 0x11); ++ INSTANCE_WR(ctx, 0x17B70/4, 0x100); ++ INSTANCE_WR(ctx, 0x17BB0/4, 0x1); ++ INSTANCE_WR(ctx, 0x17C10/4, 0x40); ++ INSTANCE_WR(ctx, 0x17C30/4, 0x100); ++ INSTANCE_WR(ctx, 0x17C70/4, 0x3); ++ INSTANCE_WR(ctx, 0x17D10/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0x17D90/4, 0x2); ++ INSTANCE_WR(ctx, 0x17DB0/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x17EF0/4, 0x1); ++ INSTANCE_WR(ctx, 0x17F90/4, 0x4); ++ INSTANCE_WR(ctx, 0x17FD0/4, 0x1); ++ INSTANCE_WR(ctx, 0x17FF0/4, 0x400); ++ INSTANCE_WR(ctx, 0x18010/4, 0x300); ++ INSTANCE_WR(ctx, 0x18030/4, 0x1001); ++ INSTANCE_WR(ctx, 0x180B0/4, 0x11); ++ INSTANCE_WR(ctx, 0x181B0/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x181D0/4, 0xF); ++ INSTANCE_WR(ctx, 0x184D0/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0x18550/4, 0x11); ++ INSTANCE_WR(ctx, 0x185B0/4, 0x4); ++ INSTANCE_WR(ctx, 0x185F0/4, 0x1); ++ INSTANCE_WR(ctx, 0x18610/4, 0x1); ++ INSTANCE_WR(ctx, 0x18690/4, 0x1); ++ INSTANCE_WR(ctx, 0x18730/4, 0x1); ++ INSTANCE_WR(ctx, 0x18770/4, 0x1); ++ INSTANCE_WR(ctx, 0x187F0/4, 0x2A712488); ++ INSTANCE_WR(ctx, 0x18830/4, 0x4085C000); ++ INSTANCE_WR(ctx, 0x18850/4, 0x40); ++ INSTANCE_WR(ctx, 0x18870/4, 0x100); ++ INSTANCE_WR(ctx, 0x18890/4, 0x10100); ++ INSTANCE_WR(ctx, 0x188B0/4, 0x2800000); ++ INSTANCE_WR(ctx, 0x18B10/4, 0x4E3BFDF); ++ INSTANCE_WR(ctx, 0x18B30/4, 0x4E3BFDF); ++ INSTANCE_WR(ctx, 0x18B50/4, 0x1); ++ INSTANCE_WR(ctx, 0x18B90/4, 0xFFFF00); ++ INSTANCE_WR(ctx, 0x18BB0/4, 0x1); ++ INSTANCE_WR(ctx, 0x18C10/4, 0xFFFF00); ++ INSTANCE_WR(ctx, 0x18D30/4, 0x1); ++ INSTANCE_WR(ctx, 0x18D70/4, 0x1); ++ INSTANCE_WR(ctx, 0x18D90/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x18DB0/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x18DD0/4, 0xB8A89888); ++ INSTANCE_WR(ctx, 0x18DF0/4, 0xF8E8D8C8); ++ INSTANCE_WR(ctx, 0x18E30/4, 0x1A); ++} ++ ++ ++static void ++nv84_graph_init_ctxvals(struct drm_device *dev, struct nouveau_gpuobj_ref *ref) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_gpuobj *ctx = ref->gpuobj; ++ ++ INSTANCE_WR(ctx, 0x0010c/4, 0x00000030); ++ INSTANCE_WR(ctx, 0x00130/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x001d4/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x001d8/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x00218/4, 0x0000fe0c); ++ INSTANCE_WR(ctx, 0x0022c/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x00258/4, 0x00000187); ++ INSTANCE_WR(ctx, 0x0026c/4, 0x00001018); ++ INSTANCE_WR(ctx, 0x00270/4, 0x000000ff); ++ INSTANCE_WR(ctx, 0x002ac/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x002b0/4, 0x044d00df); ++ INSTANCE_WR(ctx, 0x002b8/4, 0x00000600); ++ INSTANCE_WR(ctx, 0x002d0/4, 0x01000000); ++ INSTANCE_WR(ctx, 0x002d4/4, 0x000000ff); ++ INSTANCE_WR(ctx, 0x002dc/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x002f4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x002f8/4, 0x000e0080); ++ INSTANCE_WR(ctx, 0x002fc/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x00318/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x0031c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00328/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0032c/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x00344/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x00348/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0034c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0035c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00360/4, 0x003fffff); ++ INSTANCE_WR(ctx, 0x00364/4, 0x00001fff); ++ INSTANCE_WR(ctx, 0x0036c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00370/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00378/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0037c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00380/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00384/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x00388/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0038c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00390/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00394/4, 0x00000007); ++ INSTANCE_WR(ctx, 0x00398/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0039c/4, 0x00000007); ++ INSTANCE_WR(ctx, 0x003a0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x003a4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x003a8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x003bc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x003c0/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x003c8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x003d4/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x003d8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x003dc/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x003e4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x003f0/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x00404/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x00408/4, 0x00000070); ++ INSTANCE_WR(ctx, 0x0040c/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x00420/4, 0x0000000c); ++ INSTANCE_WR(ctx, 0x00428/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x0042c/4, 0x00000014); ++ INSTANCE_WR(ctx, 0x00434/4, 0x00000029); ++ INSTANCE_WR(ctx, 0x00438/4, 0x00000027); ++ INSTANCE_WR(ctx, 0x0043c/4, 0x00000026); ++ INSTANCE_WR(ctx, 0x00440/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x00444/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x00448/4, 0x00000027); ++ INSTANCE_WR(ctx, 0x00454/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00458/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x0045c/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x00460/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x00464/4, 0x00000005); ++ INSTANCE_WR(ctx, 0x00468/4, 0x00000006); ++ INSTANCE_WR(ctx, 0x0046c/4, 0x00000007); ++ INSTANCE_WR(ctx, 0x00470/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x004b4/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x004e4/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x004e8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x004ec/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x004f0/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x004f4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00500/4, 0x00000012); ++ INSTANCE_WR(ctx, 0x00504/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x00508/4, 0x0000000c); ++ INSTANCE_WR(ctx, 0x0050c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0051c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x00520/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x00524/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x00530/4, 0x003fffff); ++ INSTANCE_WR(ctx, 0x00534/4, 0x00001fff); ++ INSTANCE_WR(ctx, 0x0055c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x00560/4, 0x00000014); ++ INSTANCE_WR(ctx, 0x00564/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00570/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x0057c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00584/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x00588/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x0058c/4, 0x00000e00); ++ INSTANCE_WR(ctx, 0x00590/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x00594/4, 0x00001e00); ++ INSTANCE_WR(ctx, 0x0059c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x005a0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x005a4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x005a8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x005ac/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x005bc/4, 0x00000200); ++ INSTANCE_WR(ctx, 0x005c4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x005c8/4, 0x00000070); ++ INSTANCE_WR(ctx, 0x005cc/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x005d8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x005dc/4, 0x00000070); ++ INSTANCE_WR(ctx, 0x005e0/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x005f0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x005f4/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x005fc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0060c/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x00614/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x0061c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00624/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0062c/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x00630/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x00634/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0063c/4, 0x00000f80); ++ INSTANCE_WR(ctx, 0x00684/4, 0x007f0080); ++ INSTANCE_WR(ctx, 0x006c0/4, 0x007f0080); ++ ++ INSTANCE_WR(ctx, 0x006e4/4, 0x3b74f821); ++ INSTANCE_WR(ctx, 0x006e8/4, 0x89058001); ++ INSTANCE_WR(ctx, 0x006f0/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x006f4/4, 0x0000001f); ++ INSTANCE_WR(ctx, 0x006f8/4, 0x027c10fa); ++ INSTANCE_WR(ctx, 0x006fc/4, 0x400000c0); ++ INSTANCE_WR(ctx, 0x00700/4, 0xb7892080); ++ ++ INSTANCE_WR(ctx, 0x0070c/4, 0x3b74f821); ++ INSTANCE_WR(ctx, 0x00710/4, 0x89058001); ++ INSTANCE_WR(ctx, 0x00718/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x0071c/4, 0x0000001f); ++ INSTANCE_WR(ctx, 0x00720/4, 0x027c10fa); ++ INSTANCE_WR(ctx, 0x00724/4, 0x400000c0); ++ INSTANCE_WR(ctx, 0x00728/4, 0xb7892080); ++ ++ INSTANCE_WR(ctx, 0x00734/4, 0x3b74f821); ++ INSTANCE_WR(ctx, 0x00738/4, 0x89058001); ++ INSTANCE_WR(ctx, 0x00740/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x00744/4, 0x0000001f); ++ INSTANCE_WR(ctx, 0x00748/4, 0x027c10fa); ++ INSTANCE_WR(ctx, 0x0074c/4, 0x400000c0); ++ INSTANCE_WR(ctx, 0x00750/4, 0xb7892080); ++ ++ INSTANCE_WR(ctx, 0x0075c/4, 0x3b74f821); ++ INSTANCE_WR(ctx, 0x00760/4, 0x89058001); ++ INSTANCE_WR(ctx, 0x00768/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x0076c/4, 0x0000001f); ++ INSTANCE_WR(ctx, 0x00770/4, 0x027c10fa); ++ INSTANCE_WR(ctx, 0x00774/4, 0x400000c0); ++ INSTANCE_WR(ctx, 0x00778/4, 0xb7892080); ++ ++ INSTANCE_WR(ctx, 0x00784/4, 0x00010040); ++ INSTANCE_WR(ctx, 0x0078c/4, 0x00000022); ++ INSTANCE_WR(ctx, 0x00798/4, 0x00010040); ++ INSTANCE_WR(ctx, 0x0079c/4, 0x00000022); ++ ++ INSTANCE_WR(ctx, 0x007b4/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x007b8/4, 0x00160000); ++ INSTANCE_WR(ctx, 0x007bc/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x007cc/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x007d0/4, 0x00880000); ++ INSTANCE_WR(ctx, 0x007f4/4, 0x00010401); ++ INSTANCE_WR(ctx, 0x007fc/4, 0x00000078); ++ INSTANCE_WR(ctx, 0x00804/4, 0x000000bf); ++ INSTANCE_WR(ctx, 0x0080c/4, 0x00001210); ++ INSTANCE_WR(ctx, 0x00810/4, 0x08000080); ++ INSTANCE_WR(ctx, 0x00834/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00838/4, 0x00160000); ++ INSTANCE_WR(ctx, 0x0083c/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x0084c/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x00850/4, 0x00880000); ++ INSTANCE_WR(ctx, 0x00874/4, 0x00010401); ++ INSTANCE_WR(ctx, 0x0087c/4, 0x00000078); ++ INSTANCE_WR(ctx, 0x00884/4, 0x000000bf); ++ INSTANCE_WR(ctx, 0x0088c/4, 0x00001210); ++ INSTANCE_WR(ctx, 0x00890/4, 0x08000080); ++ INSTANCE_WR(ctx, 0x008b8/4, 0x00027070); ++ INSTANCE_WR(ctx, 0x008c4/4, 0x03ffffff); ++ INSTANCE_WR(ctx, 0x008dc/4, 0x00120407); ++ INSTANCE_WR(ctx, 0x008e0/4, 0x05091507); ++ INSTANCE_WR(ctx, 0x008e4/4, 0x05100202); ++ INSTANCE_WR(ctx, 0x008e8/4, 0x00030201); ++ INSTANCE_WR(ctx, 0x00904/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00908/4, 0x0d0c0b0a); ++ INSTANCE_WR(ctx, 0x0090c/4, 0x00141210); ++ INSTANCE_WR(ctx, 0x00910/4, 0x000001f0); ++ INSTANCE_WR(ctx, 0x00914/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00918/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x00924/4, 0x00039e00); ++ INSTANCE_WR(ctx, 0x00928/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x0092c/4, 0x00003800); ++ INSTANCE_WR(ctx, 0x00930/4, 0x00404040); ++ INSTANCE_WR(ctx, 0x00934/4, 0x0000ff0a); ++ INSTANCE_WR(ctx, 0x0093c/4, 0x0077f005); ++ INSTANCE_WR(ctx, 0x00940/4, 0x003f7fff); ++ ++ INSTANCE_WR(ctx, 0x00950/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00954/4, 0x00160000); ++ INSTANCE_WR(ctx, 0x00958/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00968/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x0096c/4, 0x00880000); ++ INSTANCE_WR(ctx, 0x00990/4, 0x00010401); ++ INSTANCE_WR(ctx, 0x00998/4, 0x00000078); ++ INSTANCE_WR(ctx, 0x009a0/4, 0x000000bf); ++ INSTANCE_WR(ctx, 0x009a8/4, 0x00001210); ++ INSTANCE_WR(ctx, 0x009ac/4, 0x08000080); ++ INSTANCE_WR(ctx, 0x009d0/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x009d4/4, 0x00160000); ++ INSTANCE_WR(ctx, 0x009d8/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x009e8/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x009ec/4, 0x00880000); ++ INSTANCE_WR(ctx, 0x00a10/4, 0x00010401); ++ INSTANCE_WR(ctx, 0x00a18/4, 0x00000078); ++ INSTANCE_WR(ctx, 0x00a20/4, 0x000000bf); ++ INSTANCE_WR(ctx, 0x00a28/4, 0x00001210); ++ INSTANCE_WR(ctx, 0x00a2c/4, 0x08000080); ++ INSTANCE_WR(ctx, 0x00a54/4, 0x00027070); ++ INSTANCE_WR(ctx, 0x00a60/4, 0x03ffffff); ++ INSTANCE_WR(ctx, 0x00a78/4, 0x00120407); ++ INSTANCE_WR(ctx, 0x00a7c/4, 0x05091507); ++ INSTANCE_WR(ctx, 0x00a80/4, 0x05100202); ++ INSTANCE_WR(ctx, 0x00a84/4, 0x00030201); ++ INSTANCE_WR(ctx, 0x00aa0/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00aa4/4, 0x0d0c0b0a); ++ INSTANCE_WR(ctx, 0x00aa8/4, 0x00141210); ++ INSTANCE_WR(ctx, 0x00aac/4, 0x000001f0); ++ INSTANCE_WR(ctx, 0x00ab0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00ab4/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x00ac0/4, 0x00039e00); ++ INSTANCE_WR(ctx, 0x00ac4/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x00ac8/4, 0x00003800); ++ INSTANCE_WR(ctx, 0x00acc/4, 0x00404040); ++ INSTANCE_WR(ctx, 0x00ad0/4, 0x0000ff0a); ++ INSTANCE_WR(ctx, 0x00ad8/4, 0x0077f005); ++ INSTANCE_WR(ctx, 0x00adc/4, 0x003f7fff); ++ ++ INSTANCE_WR(ctx, 0x00aec/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00af0/4, 0x00160000); ++ INSTANCE_WR(ctx, 0x00af4/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00b04/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x00b08/4, 0x00880000); ++ INSTANCE_WR(ctx, 0x00b2c/4, 0x00010401); ++ INSTANCE_WR(ctx, 0x00b34/4, 0x00000078); ++ INSTANCE_WR(ctx, 0x00b3c/4, 0x000000bf); ++ INSTANCE_WR(ctx, 0x00b44/4, 0x00001210); ++ INSTANCE_WR(ctx, 0x00b48/4, 0x08000080); ++ INSTANCE_WR(ctx, 0x00b6c/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00b70/4, 0x00160000); ++ INSTANCE_WR(ctx, 0x00b74/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00b84/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x00b88/4, 0x00880000); ++ INSTANCE_WR(ctx, 0x00bac/4, 0x00010401); ++ INSTANCE_WR(ctx, 0x00bb4/4, 0x00000078); ++ INSTANCE_WR(ctx, 0x00bbc/4, 0x000000bf); ++ INSTANCE_WR(ctx, 0x00bc4/4, 0x00001210); ++ INSTANCE_WR(ctx, 0x00bc8/4, 0x08000080); ++ INSTANCE_WR(ctx, 0x00bf0/4, 0x00027070); ++ INSTANCE_WR(ctx, 0x00bfc/4, 0x03ffffff); ++ INSTANCE_WR(ctx, 0x00c14/4, 0x00120407); ++ INSTANCE_WR(ctx, 0x00c18/4, 0x05091507); ++ INSTANCE_WR(ctx, 0x00c1c/4, 0x05100202); ++ INSTANCE_WR(ctx, 0x00c20/4, 0x00030201); ++ INSTANCE_WR(ctx, 0x00c3c/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00c40/4, 0x0d0c0b0a); ++ INSTANCE_WR(ctx, 0x00c44/4, 0x00141210); ++ INSTANCE_WR(ctx, 0x00c48/4, 0x000001f0); ++ INSTANCE_WR(ctx, 0x00c4c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00c50/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x00c5c/4, 0x00039e00); ++ INSTANCE_WR(ctx, 0x00c60/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x00c64/4, 0x00003800); ++ INSTANCE_WR(ctx, 0x00c68/4, 0x00404040); ++ INSTANCE_WR(ctx, 0x00c6c/4, 0x0000ff0a); ++ INSTANCE_WR(ctx, 0x00c74/4, 0x0077f005); ++ INSTANCE_WR(ctx, 0x00c78/4, 0x003f7fff); ++ ++ INSTANCE_WR(ctx, 0x00c88/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00c8c/4, 0x00160000); ++ INSTANCE_WR(ctx, 0x00c90/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00ca0/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x00ca4/4, 0x00880000); ++ INSTANCE_WR(ctx, 0x00cc8/4, 0x00010401); ++ INSTANCE_WR(ctx, 0x00cd0/4, 0x00000078); ++ INSTANCE_WR(ctx, 0x00cd8/4, 0x000000bf); ++ INSTANCE_WR(ctx, 0x00ce0/4, 0x00001210); ++ INSTANCE_WR(ctx, 0x00ce4/4, 0x08000080); ++ INSTANCE_WR(ctx, 0x00d08/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00d0c/4, 0x00160000); ++ INSTANCE_WR(ctx, 0x00d10/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00d20/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x00d24/4, 0x00880000); ++ INSTANCE_WR(ctx, 0x00d48/4, 0x00010401); ++ INSTANCE_WR(ctx, 0x00d50/4, 0x00000078); ++ INSTANCE_WR(ctx, 0x00d58/4, 0x000000bf); ++ INSTANCE_WR(ctx, 0x00d60/4, 0x00001210); ++ INSTANCE_WR(ctx, 0x00d64/4, 0x08000080); ++ INSTANCE_WR(ctx, 0x00d8c/4, 0x00027070); ++ INSTANCE_WR(ctx, 0x00d98/4, 0x03ffffff); ++ INSTANCE_WR(ctx, 0x00db0/4, 0x00120407); ++ INSTANCE_WR(ctx, 0x00db4/4, 0x05091507); ++ INSTANCE_WR(ctx, 0x00db8/4, 0x05100202); ++ INSTANCE_WR(ctx, 0x00dbc/4, 0x00030201); ++ INSTANCE_WR(ctx, 0x00dd8/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00ddc/4, 0x0d0c0b0a); ++ INSTANCE_WR(ctx, 0x00de0/4, 0x00141210); ++ INSTANCE_WR(ctx, 0x00de4/4, 0x000001f0); ++ INSTANCE_WR(ctx, 0x00de8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00dec/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x00df8/4, 0x00039e00); ++ INSTANCE_WR(ctx, 0x00dfc/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x00e00/4, 0x00003800); ++ INSTANCE_WR(ctx, 0x00e04/4, 0x00404040); ++ INSTANCE_WR(ctx, 0x00e08/4, 0x0000ff0a); ++ INSTANCE_WR(ctx, 0x00e10/4, 0x0077f005); ++ INSTANCE_WR(ctx, 0x00e14/4, 0x003f7fff); ++ ++ INSTANCE_WR(ctx, 0x00e24/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00e28/4, 0x00160000); ++ INSTANCE_WR(ctx, 0x00e2c/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00e3c/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x00e40/4, 0x00880000); ++ INSTANCE_WR(ctx, 0x00e64/4, 0x00010401); ++ INSTANCE_WR(ctx, 0x00e6c/4, 0x00000078); ++ INSTANCE_WR(ctx, 0x00e74/4, 0x000000bf); ++ INSTANCE_WR(ctx, 0x00e7c/4, 0x00001210); ++ INSTANCE_WR(ctx, 0x00e80/4, 0x08000080); ++ INSTANCE_WR(ctx, 0x00ea4/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00ea8/4, 0x00160000); ++ INSTANCE_WR(ctx, 0x00eac/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00ebc/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x00ec0/4, 0x00880000); ++ INSTANCE_WR(ctx, 0x00ee4/4, 0x00010401); ++ INSTANCE_WR(ctx, 0x00eec/4, 0x00000078); ++ INSTANCE_WR(ctx, 0x00ef4/4, 0x000000bf); ++ INSTANCE_WR(ctx, 0x00efc/4, 0x00001210); ++ INSTANCE_WR(ctx, 0x00f00/4, 0x08000080); ++ INSTANCE_WR(ctx, 0x00f28/4, 0x00027070); ++ INSTANCE_WR(ctx, 0x00f34/4, 0x03ffffff); ++ INSTANCE_WR(ctx, 0x00f4c/4, 0x00120407); ++ INSTANCE_WR(ctx, 0x00f50/4, 0x05091507); ++ INSTANCE_WR(ctx, 0x00f54/4, 0x05100202); ++ INSTANCE_WR(ctx, 0x00f58/4, 0x00030201); ++ INSTANCE_WR(ctx, 0x00f74/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00f78/4, 0x0d0c0b0a); ++ INSTANCE_WR(ctx, 0x00f7c/4, 0x00141210); ++ INSTANCE_WR(ctx, 0x00f80/4, 0x000001f0); ++ INSTANCE_WR(ctx, 0x00f84/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00f88/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x00f94/4, 0x00039e00); ++ INSTANCE_WR(ctx, 0x00f98/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x00f9c/4, 0x00003800); ++ INSTANCE_WR(ctx, 0x00fa0/4, 0x00404040); ++ INSTANCE_WR(ctx, 0x00fa4/4, 0x0000ff0a); ++ INSTANCE_WR(ctx, 0x00fac/4, 0x0077f005); ++ INSTANCE_WR(ctx, 0x00fb0/4, 0x003f7fff); ++ ++ INSTANCE_WR(ctx, 0x00fc0/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00fc4/4, 0x00160000); ++ INSTANCE_WR(ctx, 0x00fc8/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00fd8/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x00fdc/4, 0x00880000); ++ INSTANCE_WR(ctx, 0x01000/4, 0x00010401); ++ INSTANCE_WR(ctx, 0x01008/4, 0x00000078); ++ INSTANCE_WR(ctx, 0x01010/4, 0x000000bf); ++ INSTANCE_WR(ctx, 0x01018/4, 0x00001210); ++ INSTANCE_WR(ctx, 0x0101c/4, 0x08000080); ++ INSTANCE_WR(ctx, 0x01040/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x01044/4, 0x00160000); ++ INSTANCE_WR(ctx, 0x01048/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x01058/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x0105c/4, 0x00880000); ++ INSTANCE_WR(ctx, 0x01080/4, 0x00010401); ++ INSTANCE_WR(ctx, 0x01088/4, 0x00000078); ++ INSTANCE_WR(ctx, 0x01090/4, 0x000000bf); ++ INSTANCE_WR(ctx, 0x01098/4, 0x00001210); ++ INSTANCE_WR(ctx, 0x0109c/4, 0x08000080); ++ INSTANCE_WR(ctx, 0x010c4/4, 0x00027070); ++ INSTANCE_WR(ctx, 0x010d0/4, 0x03ffffff); ++ INSTANCE_WR(ctx, 0x010e8/4, 0x00120407); ++ INSTANCE_WR(ctx, 0x010ec/4, 0x05091507); ++ INSTANCE_WR(ctx, 0x010f0/4, 0x05100202); ++ INSTANCE_WR(ctx, 0x010f4/4, 0x00030201); ++ INSTANCE_WR(ctx, 0x01110/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x01114/4, 0x0d0c0b0a); ++ INSTANCE_WR(ctx, 0x01118/4, 0x00141210); ++ INSTANCE_WR(ctx, 0x0111c/4, 0x000001f0); ++ INSTANCE_WR(ctx, 0x01120/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x01124/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x01130/4, 0x00039e00); ++ INSTANCE_WR(ctx, 0x01134/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x01138/4, 0x00003800); ++ INSTANCE_WR(ctx, 0x0113c/4, 0x00404040); ++ INSTANCE_WR(ctx, 0x01140/4, 0x0000ff0a); ++ INSTANCE_WR(ctx, 0x01148/4, 0x0077f005); ++ INSTANCE_WR(ctx, 0x0114c/4, 0x003f7fff); ++ ++ INSTANCE_WR(ctx, 0x01230/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01284/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x0130c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x01324/4, 0x00000020); ++ INSTANCE_WR(ctx, 0x0134c/4, 0x001ffe67); ++ INSTANCE_WR(ctx, 0x014ec/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x014f0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01504/4, 0x0000001a); ++ INSTANCE_WR(ctx, 0x0150c/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x01510/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01530/4, 0x00608080); ++ INSTANCE_WR(ctx, 0x0156c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x015d0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01630/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x0164c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x01650/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01670/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x01690/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x016c4/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x016e4/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01724/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01744/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x0176c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x01784/4, 0x000007ff); ++ INSTANCE_WR(ctx, 0x0178c/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x017cc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x01924/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x01a4c/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x01b30/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01b50/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x01b70/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01b90/4, 0x03020100); ++ INSTANCE_WR(ctx, 0x01bb0/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x01bd0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01c6c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01c70/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01c8c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01c90/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x01cac/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01ccc/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01cec/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01d0c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01d10/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01d2c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01d4c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01d6c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01d8c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01dac/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01dcc/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01dec/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01e0c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01e2c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01e4c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0218c/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x021cc/4, 0x0000003f); ++ INSTANCE_WR(ctx, 0x022ac/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x022ec/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0232c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x024cc/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x025cc/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x026cc/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x027ac/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x027cc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x027ec/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0280c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x0282c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0284c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x0286c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x028ac/4, 0x001ffe67); ++ INSTANCE_WR(ctx, 0x028ec/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x02bac/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x02bcc/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x02bec/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x02c0c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x02c2c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x02c4c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x02c6c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x02cec/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x02d0c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0398c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x039cc/4, 0x001ffe67); ++ INSTANCE_WR(ctx, 0x03b6c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x03b8c/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x03bec/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x03ccc/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x03dec/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x03e04/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x03e0c/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x03e44/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x03e4c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x040cc/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x042ec/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0430c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0432c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0434c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0436c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0438c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x043ac/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x043cc/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x043ec/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0440c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0442c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0444c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0446c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0448c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x044ac/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x044cc/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0480c/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x0484c/4, 0x0000003f); ++ INSTANCE_WR(ctx, 0x0492c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0496c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x049a4/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x049ac/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x04b4c/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x04c4c/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x04d4c/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x04e2c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x04e4c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x04e6c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x04e8c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x04eac/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x04ecc/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x04eec/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x04f2c/4, 0x001ffe67); ++ INSTANCE_WR(ctx, 0x04f6c/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x0522c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0524c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x0526c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0528c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x052ac/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x052cc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x052ec/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0536c/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x0538c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x083a0/4, 0x00000021); ++ INSTANCE_WR(ctx, 0x083c0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x083e0/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x08400/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x08420/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x08440/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x084a0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x084c0/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x084e0/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x08500/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x08520/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x11e40/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x11e60/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x15044/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x152e4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x15304/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x15324/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x15344/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x15384/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x15444/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x15484/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x154a4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x154c4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x154e4/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x15504/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x155e4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x15624/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x15644/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x15664/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x15704/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x15744/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x15764/4, 0x00000015); ++ INSTANCE_WR(ctx, 0x157e4/4, 0x04444480); ++ INSTANCE_WR(ctx, 0x15f64/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x16004/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x16064/4, 0x00010001); ++ INSTANCE_WR(ctx, 0x160a4/4, 0x00010001); ++ INSTANCE_WR(ctx, 0x160c4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x160e4/4, 0x00010001); ++ INSTANCE_WR(ctx, 0x16104/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16124/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x16144/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x161b0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x161c8/4, 0x003fffff); ++ INSTANCE_WR(ctx, 0x161d0/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x16228/4, 0x00001fff); ++ INSTANCE_WR(ctx, 0x16408/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x16410/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x164e8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x16508/4, 0x0000001a); ++ INSTANCE_WR(ctx, 0x16568/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16590/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x165b0/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x165d0/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x165f0/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x16610/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x16730/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x167b0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x167c8/4, 0x00ffff00); ++ INSTANCE_WR(ctx, 0x16870/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x168a8/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x169a8/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x169c8/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x16a10/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16a30/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16a50/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x16a70/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16a90/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16ab0/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x16ad0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16b10/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x16bc8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x16c10/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x16c68/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x16c70/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x16c88/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x16ca8/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x16cf0/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x16d10/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16d28/4, 0x00000005); ++ INSTANCE_WR(ctx, 0x16d48/4, 0x00000052); ++ INSTANCE_WR(ctx, 0x16d50/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x16d70/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x16d90/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x16de8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16ef0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16f10/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16f30/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x16f50/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16f70/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16f90/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x16fb0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16ff0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17008/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x17010/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17028/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x17030/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17048/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x17050/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17068/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x17070/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17088/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x17090/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x170a8/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x170b0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x170c8/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x170d0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x170e8/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x170f0/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x17108/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x17128/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x17148/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x17168/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x17188/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x171a8/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x171c8/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x171e8/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x171f0/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x17208/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x17210/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x17310/4, 0x001ffe67); ++ INSTANCE_WR(ctx, 0x17370/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x17390/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17410/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x174d0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17570/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x17670/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x176e8/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x176f0/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x17708/4, 0x00000005); ++ INSTANCE_WR(ctx, 0x17710/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17750/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17768/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17790/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x177a8/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x177c8/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x177d0/4, 0x000007ff); ++ INSTANCE_WR(ctx, 0x177e8/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x17808/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x17810/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17828/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x17850/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17bc4/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x17be4/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x17c28/4, 0x00ffff00); ++ INSTANCE_WR(ctx, 0x17c48/4, 0x0000001a); ++ INSTANCE_WR(ctx, 0x17c84/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x17c88/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x17db0/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x17dd0/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x17df0/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x17e04/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x17e10/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x17e24/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x17e30/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x17e50/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x17e70/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x17e90/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x17eb0/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x17fb0/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x17fd0/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x17ff0/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x18010/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x18030/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x18050/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x18070/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x18090/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x180b0/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x180d0/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x180f0/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x18110/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x18130/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x18150/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x18168/4, 0x00000102); ++ INSTANCE_WR(ctx, 0x18170/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x18190/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x181a8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x181b0/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x181c8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x181d0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x181e8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x181f0/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x18208/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x18228/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x18248/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x18288/4, 0x000007ff); ++ INSTANCE_WR(ctx, 0x182c8/4, 0x00000102); ++ INSTANCE_WR(ctx, 0x182f0/4, 0x00000020); ++ INSTANCE_WR(ctx, 0x18310/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x18330/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x18370/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x183d0/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x183f0/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x18408/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x18428/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x18430/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x18448/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x18468/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x184d0/4, 0x001ffe67); ++ INSTANCE_WR(ctx, 0x18550/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x18570/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x186b0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x18750/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x18790/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x187b0/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x187d0/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x187f0/4, 0x00001001); ++ INSTANCE_WR(ctx, 0x18870/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x18970/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x18990/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x18aa8/4, 0x00080c14); ++ INSTANCE_WR(ctx, 0x18b08/4, 0x00000804); ++ INSTANCE_WR(ctx, 0x18b48/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x18b68/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x18b88/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x18bc8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x18be8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x18c28/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x18c90/4, 0x001ffe67); ++ INSTANCE_WR(ctx, 0x18cc8/4, 0x00000804); ++ INSTANCE_WR(ctx, 0x18ce8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x18d08/4, 0x0000001a); ++ INSTANCE_WR(ctx, 0x18d10/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x18d28/4, 0x0000007f); ++ INSTANCE_WR(ctx, 0x18d68/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x18d70/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x18d88/4, 0x00080c14); ++ INSTANCE_WR(ctx, 0x18db0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x18dc8/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x18dd0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x18de8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x18e08/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x18e48/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x18e50/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x18ec8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x18ee8/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x18ef0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x18f30/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x18fb0/4, 0x2a712488); ++ INSTANCE_WR(ctx, 0x18fc8/4, 0x000007ff); ++ INSTANCE_WR(ctx, 0x18fe8/4, 0x00080c14); ++ INSTANCE_WR(ctx, 0x18ff0/4, 0x4085c000); ++ INSTANCE_WR(ctx, 0x19010/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x19030/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x19050/4, 0x00010100); ++ INSTANCE_WR(ctx, 0x19070/4, 0x02800000); ++ INSTANCE_WR(ctx, 0x192d0/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x192f0/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x19310/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x19350/4, 0x00ffff00); ++ INSTANCE_WR(ctx, 0x19370/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x193d0/4, 0x00ffff00); ++ INSTANCE_WR(ctx, 0x194f0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x19530/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x19550/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x19570/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x19590/4, 0xb8a89888); ++ INSTANCE_WR(ctx, 0x195b0/4, 0xf8e8d8c8); ++ INSTANCE_WR(ctx, 0x195f0/4, 0x0000001a); ++ INSTANCE_WR(ctx, 0x19630/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x19708/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x19768/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x198f0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x19910/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x19930/4, 0x00608080); ++ INSTANCE_WR(ctx, 0x199d0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x19a30/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x19a50/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x19a70/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x19a90/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x19e88/4, 0x00000088); ++ INSTANCE_WR(ctx, 0x19ea8/4, 0x00000088); ++ INSTANCE_WR(ctx, 0x19f08/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x19f30/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x19f50/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x19f70/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x19f90/4, 0x03020100); ++ INSTANCE_WR(ctx, 0x19fb0/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x19fd0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1a070/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1a090/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x1a110/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1a1e8/4, 0x00000026); ++ INSTANCE_WR(ctx, 0x1a248/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x1a2c8/4, 0x0000001a); ++ INSTANCE_WR(ctx, 0x1a2e8/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x1a808/4, 0x00000052); ++ INSTANCE_WR(ctx, 0x1a848/4, 0x00000026); ++ INSTANCE_WR(ctx, 0x1a888/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1a8a8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1a8e8/4, 0x0000001a); ++ INSTANCE_WR(ctx, 0x1a948/4, 0x00ffff00); ++ INSTANCE_WR(ctx, 0x1a988/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1a9a8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1a9e8/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x1aa08/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1aa28/4, 0x00080c14); ++ INSTANCE_WR(ctx, 0x1aa68/4, 0x000007ff); ++ INSTANCE_WR(ctx, 0x2d2c8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x2d2e8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x2d328/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x2d348/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x2d368/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2d3a8/4, 0x00000027); ++ INSTANCE_WR(ctx, 0x2d3e8/4, 0x00000026); ++ INSTANCE_WR(ctx, 0x2d468/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x2d488/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x2d4a8/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x2d4c8/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x2d4e8/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x2d508/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x2d528/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x2d548/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x2d568/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x2d588/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x2d5a8/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x2d5c8/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x2d5e8/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x2d608/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x2d628/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x2d648/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x2dae8/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x2db08/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x2db68/4, 0x0001fe21); ++ INSTANCE_WR(ctx, 0x2e5b0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x2e5d0/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x2e810/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x2e990/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x2e9b0/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x2e9d0/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x2e9f0/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x2ea10/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x2eb30/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2ebb0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2ec70/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2ee10/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2ee30/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2ee50/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x2ee70/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2ee90/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2eeb0/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x2eed0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2ef10/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x2f010/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x2f070/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x2f0f0/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x2f110/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f150/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x2f170/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x2f190/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x2f2f0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f310/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f330/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x2f350/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f370/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f390/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x2f3b0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f3f0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f410/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f430/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f450/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f470/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f490/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f4b0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f4d0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f4f0/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x2f5f0/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x2f610/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x2f710/4, 0x001ffe67); ++ INSTANCE_WR(ctx, 0x2f770/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x2f790/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f810/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x2f8d0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f970/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x2fa70/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x2faf0/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x2fb10/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2fb50/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2fb90/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2fbd0/4, 0x000007ff); ++ INSTANCE_WR(ctx, 0x2fc10/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2fc50/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x301b0/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x301d0/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x301f0/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x30210/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x30230/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x30250/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x30270/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x30290/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x302b0/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x303b0/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x303d0/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x303f0/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x30410/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x30430/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x30450/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x30470/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x30490/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x304b0/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x304d0/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x304f0/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x30510/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x30530/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x30550/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x30570/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x30590/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x305b0/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x305d0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x305f0/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x306f0/4, 0x00000020); ++ INSTANCE_WR(ctx, 0x30710/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x30730/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x30770/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x307d0/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x307f0/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x30830/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x308d0/4, 0x001ffe67); ++ INSTANCE_WR(ctx, 0x30950/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x30970/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x30ab0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x30b50/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x30b90/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x30bb0/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x30bd0/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x30bf0/4, 0x00001001); ++ INSTANCE_WR(ctx, 0x30c70/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x30d70/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x30d90/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x31090/4, 0x001ffe67); ++ INSTANCE_WR(ctx, 0x31110/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x31170/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x311b0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x311d0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x31250/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x312f0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x31330/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x313b0/4, 0x2a712488); ++ INSTANCE_WR(ctx, 0x313f0/4, 0x4085c000); ++ INSTANCE_WR(ctx, 0x31410/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x31430/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x31450/4, 0x00010100); ++ INSTANCE_WR(ctx, 0x31470/4, 0x02800000); ++ INSTANCE_WR(ctx, 0x316d0/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x316f0/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x31710/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x31750/4, 0x00ffff00); ++ INSTANCE_WR(ctx, 0x31770/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x317d0/4, 0x00ffff00); ++ INSTANCE_WR(ctx, 0x318f0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x31930/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x31950/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x31970/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x31990/4, 0xb8a89888); ++ INSTANCE_WR(ctx, 0x319b0/4, 0xf8e8d8c8); ++ INSTANCE_WR(ctx, 0x319f0/4, 0x0000001a); ++ INSTANCE_WR(ctx, 0x4a7e0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x4a800/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x4a820/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x4a840/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x4a880/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x4a8c0/4, 0x00080c14); ++ INSTANCE_WR(ctx, 0x4a8e0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4a900/4, 0x00080c14); ++ INSTANCE_WR(ctx, 0x4a960/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x4a980/4, 0x00000027); ++ INSTANCE_WR(ctx, 0x4a9e0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x52220/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x52500/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x526a0/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x526c0/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x52700/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x52780/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x527c0/4, 0x0000003f); ++ INSTANCE_WR(ctx, 0x52920/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x52940/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x52960/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x52a80/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x52b00/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x52d40/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x52d60/4, 0x00001001); ++ INSTANCE_WR(ctx, 0x52d80/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x52da0/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x52dc0/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x52de0/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x53200/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x53220/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x53240/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x53260/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x53280/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x532a0/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x532c0/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x532e0/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x53300/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x53320/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x53340/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x53360/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x53380/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x533a0/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x533c0/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x533e0/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x53400/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x53460/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x53500/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x53524/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x53540/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x53544/4, 0x80007004); ++ INSTANCE_WR(ctx, 0x53560/4, 0x80007004); ++ INSTANCE_WR(ctx, 0x53564/4, 0x04000400); ++ INSTANCE_WR(ctx, 0x53580/4, 0x04000400); ++ INSTANCE_WR(ctx, 0x53584/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x535a0/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x535e4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x53600/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x53644/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x53660/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x53684/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x536a0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x536a4/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x536c0/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x53824/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x53840/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x53844/4, 0x80007004); ++ INSTANCE_WR(ctx, 0x53860/4, 0x80007004); ++ INSTANCE_WR(ctx, 0x53864/4, 0x04000400); ++ INSTANCE_WR(ctx, 0x53880/4, 0x04000400); ++ INSTANCE_WR(ctx, 0x53884/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x538a0/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x538e4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x53900/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x53944/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x53960/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x53984/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x539a0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x539a4/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x539c0/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x53b04/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x53b20/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x53be4/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x53c00/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x53c04/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x53c20/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x53c24/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x53c40/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x53c44/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x53c60/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x53c64/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x53c80/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x53c84/4, 0x00010001); ++ INSTANCE_WR(ctx, 0x53ca0/4, 0x00010001); ++ INSTANCE_WR(ctx, 0x53ca4/4, 0x00010001); ++ INSTANCE_WR(ctx, 0x53cc0/4, 0x00010001); ++ INSTANCE_WR(ctx, 0x53cc4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x53ce0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x53d04/4, 0x0001fe21); ++ INSTANCE_WR(ctx, 0x53d20/4, 0x0001fe21); ++ INSTANCE_WR(ctx, 0x53dc4/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x53de0/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x53de4/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x53e00/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x53e24/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x53e40/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x53e44/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x53e60/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x53f64/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x53f80/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x54004/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x54020/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x54144/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x54160/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x54164/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x54180/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x54184/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x541a0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x541a4/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x541c0/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x541c4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x541e0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x541e4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x54200/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x54204/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x54220/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x54244/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x54260/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x5b6a4/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x5b6c0/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x5b6e4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x5b700/4, 0x00000001); ++} ++ ++int ++nv50_graph_create_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_gpuobj *ramin = chan->ramin->gpuobj; ++ struct nouveau_engine *engine = &dev_priv->Engine; ++ int grctx_size = 0x60000, hdr; ++ int ret; ++ ++ DRM_DEBUG("ch%d\n", chan->id); ++ ++ ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, grctx_size, 0x1000, ++ NVOBJ_FLAG_ZERO_ALLOC | ++ NVOBJ_FLAG_ZERO_FREE, &chan->ramin_grctx); ++ if (ret) ++ return ret; ++ ++ hdr = IS_G80 ? 0x200 : 0x20; ++ INSTANCE_WR(ramin, (hdr + 0x00)/4, 0x00190002); ++ INSTANCE_WR(ramin, (hdr + 0x04)/4, chan->ramin_grctx->instance + ++ grctx_size - 1); ++ INSTANCE_WR(ramin, (hdr + 0x08)/4, chan->ramin_grctx->instance); ++ INSTANCE_WR(ramin, (hdr + 0x0c)/4, 0); ++ INSTANCE_WR(ramin, (hdr + 0x10)/4, 0); ++ INSTANCE_WR(ramin, (hdr + 0x14)/4, 0x00010000); ++ ++ INSTANCE_WR(chan->ramin_grctx->gpuobj, 0x00000/4, ++ chan->ramin->instance >> 12); ++ INSTANCE_WR(chan->ramin_grctx->gpuobj, 0x0011c/4, 0x00000002); ++ ++ switch (dev_priv->chipset) { ++ case 0x84: ++ nv84_graph_init_ctxvals(dev, chan->ramin_grctx); ++ break; ++ case 0x86: ++ nv86_graph_init_ctxvals(dev, chan->ramin_grctx); ++ break; ++ default: ++ /* This is complete crack, it accidently used to make at ++ * least some G8x cards work partially somehow, though there's ++ * no good reason why - and it stopped working as the rest ++ * of the code got off the drugs.. ++ */ ++ ret = engine->graph.load_context(chan); ++ if (ret) { ++ DRM_ERROR("Error hacking up context: %d\n", ret); ++ return ret; ++ } ++ break; ++ } ++ ++ return 0; ++} ++ ++void ++nv50_graph_destroy_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int i, hdr; ++ ++ DRM_DEBUG("ch%d\n", chan->id); ++ ++ hdr = IS_G80 ? 0x200 : 0x20; ++ for (i=hdr; iramin->gpuobj, i/4, 0); ++ ++ nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx); ++} ++ ++static int ++nv50_graph_transfer_context(struct drm_device *dev, uint32_t inst, int save) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t old_cp, tv = 20000; ++ int i; ++ ++ DRM_DEBUG("inst=0x%08x, save=%d\n", inst, save); ++ ++ old_cp = NV_READ(NV20_PGRAPH_CHANNEL_CTX_POINTER); ++ NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, inst); ++ NV_WRITE(0x400824, NV_READ(0x400824) | ++ (save ? NV40_PGRAPH_CTXCTL_0310_XFER_SAVE : ++ NV40_PGRAPH_CTXCTL_0310_XFER_LOAD)); ++ NV_WRITE(NV40_PGRAPH_CTXCTL_0304, NV40_PGRAPH_CTXCTL_0304_XFER_CTX); ++ ++ for (i = 0; i < tv; i++) { ++ if (NV_READ(NV40_PGRAPH_CTXCTL_030C) == 0) ++ break; ++ } ++ NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, old_cp); ++ ++ if (i == tv) { ++ DRM_ERROR("failed: inst=0x%08x save=%d\n", inst, save); ++ DRM_ERROR("0x40030C = 0x%08x\n", ++ NV_READ(NV40_PGRAPH_CTXCTL_030C)); ++ return -EBUSY; ++ } ++ ++ return 0; ++} ++ ++int ++nv50_graph_load_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t inst = chan->ramin->instance >> 12; ++ int ret; (void)ret; ++ ++ DRM_DEBUG("ch%d\n", chan->id); ++ ++ ++ NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, inst); ++ NV_WRITE(0x400320, 4); ++ NV_WRITE(NV40_PGRAPH_CTXCTL_CUR, inst | (1<<31)); ++ ++ return 0; ++} ++ ++int ++nv50_graph_save_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ uint32_t inst = chan->ramin->instance >> 12; ++ ++ DRM_DEBUG("ch%d\n", chan->id); ++ ++ return nv50_graph_transfer_context(dev, inst, 1); ++} +diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c +new file mode 100644 +index 0000000..b7a51f0 +--- /dev/null ++++ b/drivers/gpu/drm/nouveau/nv50_instmem.c +@@ -0,0 +1,324 @@ ++/* ++ * Copyright (C) 2007 Ben Skeggs. ++ * ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining ++ * a copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sublicense, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial ++ * portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. ++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE ++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION ++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION ++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "nouveau_drv.h" ++ ++typedef struct { ++ uint32_t save1700[5]; /* 0x1700->0x1710 */ ++ ++ struct nouveau_gpuobj_ref *pramin_pt; ++ struct nouveau_gpuobj_ref *pramin_bar; ++} nv50_instmem_priv; ++ ++#define NV50_INSTMEM_PAGE_SHIFT 12 ++#define NV50_INSTMEM_PAGE_SIZE (1 << NV50_INSTMEM_PAGE_SHIFT) ++#define NV50_INSTMEM_PT_SIZE(a) (((a) >> 12) << 3) ++ ++/*NOTE: - Assumes 0x1700 already covers the correct MiB of PRAMIN ++ */ ++#define BAR0_WI32(g,o,v) do { \ ++ uint32_t offset; \ ++ if ((g)->im_backing) { \ ++ offset = (g)->im_backing->start; \ ++ } else { \ ++ offset = chan->ramin->gpuobj->im_backing->start; \ ++ offset += (g)->im_pramin->start; \ ++ } \ ++ offset += (o); \ ++ NV_WRITE(NV_RAMIN + (offset & 0xfffff), (v)); \ ++} while(0) ++ ++int ++nv50_instmem_init(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_channel *chan; ++ uint32_t c_offset, c_size, c_ramfc, c_vmpd, c_base, pt_size; ++ nv50_instmem_priv *priv; ++ int ret, i; ++ uint32_t v; ++ ++ priv = drm_calloc(1, sizeof(*priv), DRM_MEM_DRIVER); ++ if (!priv) ++ return -ENOMEM; ++ dev_priv->Engine.instmem.priv = priv; ++ ++ /* Save state, will restore at takedown. */ ++ for (i = 0x1700; i <= 0x1710; i+=4) ++ priv->save1700[(i-0x1700)/4] = NV_READ(i); ++ ++ /* Reserve the last MiB of VRAM, we should probably try to avoid ++ * setting up the below tables over the top of the VBIOS image at ++ * some point. ++ */ ++ dev_priv->ramin_rsvd_vram = 1 << 20; ++ c_offset = nouveau_mem_fb_amount(dev) - dev_priv->ramin_rsvd_vram; ++ c_size = 128 << 10; ++ c_vmpd = ((dev_priv->chipset & 0xf0) == 0x50) ? 0x1400 : 0x200; ++ c_ramfc = ((dev_priv->chipset & 0xf0) == 0x50) ? 0x0 : 0x20; ++ c_base = c_vmpd + 0x4000; ++ pt_size = NV50_INSTMEM_PT_SIZE(dev_priv->ramin->size); ++ ++ DRM_DEBUG(" Rsvd VRAM base: 0x%08x\n", c_offset); ++ DRM_DEBUG(" VBIOS image: 0x%08x\n", (NV_READ(0x619f04)&~0xff)<<8); ++ DRM_DEBUG(" Aperture size: %d MiB\n", ++ (uint32_t)dev_priv->ramin->size >> 20); ++ DRM_DEBUG(" PT size: %d KiB\n", pt_size >> 10); ++ ++ NV_WRITE(NV50_PUNK_BAR0_PRAMIN, (c_offset >> 16)); ++ ++ /* Create a fake channel, and use it as our "dummy" channels 0/127. ++ * The main reason for creating a channel is so we can use the gpuobj ++ * code. However, it's probably worth noting that NVIDIA also setup ++ * their channels 0/127 with the same values they configure here. ++ * So, there may be some other reason for doing this. ++ * ++ * Have to create the entire channel manually, as the real channel ++ * creation code assumes we have PRAMIN access, and we don't until ++ * we're done here. ++ */ ++ chan = drm_calloc(1, sizeof(*chan), DRM_MEM_DRIVER); ++ if (!chan) ++ return -ENOMEM; ++ chan->id = 0; ++ chan->dev = dev; ++ chan->file_priv = (struct drm_file *)-2; ++ dev_priv->fifos[0] = dev_priv->fifos[127] = chan; ++ ++ /* Channel's PRAMIN object + heap */ ++ if ((ret = nouveau_gpuobj_new_fake(dev, 0, c_offset, 128<<10, 0, ++ NULL, &chan->ramin))) ++ return ret; ++ ++ if (nouveau_mem_init_heap(&chan->ramin_heap, c_base, c_size - c_base)) ++ return -ENOMEM; ++ ++ /* RAMFC + zero channel's PRAMIN up to start of VM pagedir */ ++ if ((ret = nouveau_gpuobj_new_fake(dev, c_ramfc, c_offset + c_ramfc, ++ 0x4000, 0, NULL, &chan->ramfc))) ++ return ret; ++ ++ for (i = 0; i < c_vmpd; i += 4) ++ BAR0_WI32(chan->ramin->gpuobj, i, 0); ++ ++ /* VM page directory */ ++ if ((ret = nouveau_gpuobj_new_fake(dev, c_vmpd, c_offset + c_vmpd, ++ 0x4000, 0, &chan->vm_pd, NULL))) ++ return ret; ++ for (i = 0; i < 0x4000; i += 8) { ++ BAR0_WI32(chan->vm_pd, i + 0x00, 0x00000000); ++ BAR0_WI32(chan->vm_pd, i + 0x04, 0x00000000); ++ } ++ ++ /* PRAMIN page table, cheat and map into VM at 0x0000000000. ++ * We map the entire fake channel into the start of the PRAMIN BAR ++ */ ++ if ((ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pt_size, 0x1000, ++ 0, &priv->pramin_pt))) ++ return ret; ++ ++ for (i = 0, v = c_offset; i < pt_size; i+=8, v+=0x1000) { ++ if (v < (c_offset + c_size)) ++ BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, v | 1); ++ else ++ BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, 0x00000009); ++ BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000); ++ } ++ ++ BAR0_WI32(chan->vm_pd, 0x00, priv->pramin_pt->instance | 0x63); ++ BAR0_WI32(chan->vm_pd, 0x04, 0x00000000); ++ ++ /* DMA object for PRAMIN BAR */ ++ if ((ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0, 6*4, 16, 0, ++ &priv->pramin_bar))) ++ return ret; ++ BAR0_WI32(priv->pramin_bar->gpuobj, 0x00, 0x7fc00000); ++ BAR0_WI32(priv->pramin_bar->gpuobj, 0x04, dev_priv->ramin->size - 1); ++ BAR0_WI32(priv->pramin_bar->gpuobj, 0x08, 0x00000000); ++ BAR0_WI32(priv->pramin_bar->gpuobj, 0x0c, 0x00000000); ++ BAR0_WI32(priv->pramin_bar->gpuobj, 0x10, 0x00000000); ++ BAR0_WI32(priv->pramin_bar->gpuobj, 0x14, 0x00000000); ++ ++ /* Poke the relevant regs, and pray it works :) */ ++ NV_WRITE(NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12)); ++ NV_WRITE(NV50_PUNK_UNK1710, 0); ++ NV_WRITE(NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12) | ++ NV50_PUNK_BAR_CFG_BASE_VALID); ++ NV_WRITE(NV50_PUNK_BAR1_CTXDMA, 0); ++ NV_WRITE(NV50_PUNK_BAR3_CTXDMA, (priv->pramin_bar->instance >> 4) | ++ NV50_PUNK_BAR3_CTXDMA_VALID); ++ ++ /* Assume that praying isn't enough, check that we can re-read the ++ * entire fake channel back from the PRAMIN BAR */ ++ for (i = 0; i < c_size; i+=4) { ++ if (NV_READ(NV_RAMIN + i) != NV_RI32(i)) { ++ DRM_ERROR("Error reading back PRAMIN at 0x%08x\n", i); ++ return -EINVAL; ++ } ++ } ++ ++ /* Global PRAMIN heap */ ++ if (nouveau_mem_init_heap(&dev_priv->ramin_heap, ++ c_size, dev_priv->ramin->size - c_size)) { ++ dev_priv->ramin_heap = NULL; ++ DRM_ERROR("Failed to init RAMIN heap\n"); ++ } ++ ++ /*XXX: incorrect, but needed to make hash func "work" */ ++ dev_priv->ramht_offset = 0x10000; ++ dev_priv->ramht_bits = 9; ++ dev_priv->ramht_size = (1 << dev_priv->ramht_bits); ++ return 0; ++} ++ ++void ++nv50_instmem_takedown(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ nv50_instmem_priv *priv = dev_priv->Engine.instmem.priv; ++ struct nouveau_channel *chan = dev_priv->fifos[0]; ++ int i; ++ ++ DRM_DEBUG("\n"); ++ ++ if (!priv) ++ return; ++ ++ /* Restore state from before init */ ++ for (i = 0x1700; i <= 0x1710; i+=4) ++ NV_WRITE(i, priv->save1700[(i-0x1700)/4]); ++ ++ nouveau_gpuobj_ref_del(dev, &priv->pramin_bar); ++ nouveau_gpuobj_ref_del(dev, &priv->pramin_pt); ++ ++ /* Destroy dummy channel */ ++ if (chan) { ++ nouveau_gpuobj_del(dev, &chan->vm_pd); ++ nouveau_gpuobj_ref_del(dev, &chan->ramfc); ++ nouveau_gpuobj_ref_del(dev, &chan->ramin); ++ nouveau_mem_takedown(&chan->ramin_heap); ++ ++ dev_priv->fifos[0] = dev_priv->fifos[127] = NULL; ++ drm_free(chan, sizeof(*chan), DRM_MEM_DRIVER); ++ } ++ ++ dev_priv->Engine.instmem.priv = NULL; ++ drm_free(priv, sizeof(*priv), DRM_MEM_DRIVER); ++} ++ ++int ++nv50_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, uint32_t *sz) ++{ ++ if (gpuobj->im_backing) ++ return -EINVAL; ++ ++ *sz = (*sz + (NV50_INSTMEM_PAGE_SIZE-1)) & ~(NV50_INSTMEM_PAGE_SIZE-1); ++ if (*sz == 0) ++ return -EINVAL; ++ ++ gpuobj->im_backing = nouveau_mem_alloc(dev, NV50_INSTMEM_PAGE_SIZE, ++ *sz, NOUVEAU_MEM_FB | ++ NOUVEAU_MEM_NOVM, ++ (struct drm_file *)-2); ++ if (!gpuobj->im_backing) { ++ DRM_ERROR("Couldn't allocate vram to back PRAMIN pages\n"); ++ return -ENOMEM; ++ } ++ ++ return 0; ++} ++ ++void ++nv50_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ if (gpuobj && gpuobj->im_backing) { ++ if (gpuobj->im_bound) ++ dev_priv->Engine.instmem.unbind(dev, gpuobj); ++ nouveau_mem_free(dev, gpuobj->im_backing); ++ gpuobj->im_backing = NULL; ++ } ++} ++ ++int ++nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ nv50_instmem_priv *priv = dev_priv->Engine.instmem.priv; ++ uint32_t pte, pte_end, vram; ++ ++ if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound) ++ return -EINVAL; ++ ++ DRM_DEBUG("st=0x%0llx sz=0x%0llx\n", ++ gpuobj->im_pramin->start, gpuobj->im_pramin->size); ++ ++ pte = (gpuobj->im_pramin->start >> 12) << 3; ++ pte_end = ((gpuobj->im_pramin->size >> 12) << 3) + pte; ++ vram = gpuobj->im_backing->start; ++ ++ DRM_DEBUG("pramin=0x%llx, pte=%d, pte_end=%d\n", ++ gpuobj->im_pramin->start, pte, pte_end); ++ DRM_DEBUG("first vram page: 0x%llx\n", ++ gpuobj->im_backing->start); ++ ++ while (pte < pte_end) { ++ INSTANCE_WR(priv->pramin_pt->gpuobj, (pte + 0)/4, vram | 1); ++ INSTANCE_WR(priv->pramin_pt->gpuobj, (pte + 4)/4, 0x00000000); ++ ++ pte += 8; ++ vram += NV50_INSTMEM_PAGE_SIZE; ++ } ++ ++ gpuobj->im_bound = 1; ++ return 0; ++} ++ ++int ++nv50_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ nv50_instmem_priv *priv = dev_priv->Engine.instmem.priv; ++ uint32_t pte, pte_end; ++ ++ if (gpuobj->im_bound == 0) ++ return -EINVAL; ++ ++ pte = (gpuobj->im_pramin->start >> 12) << 3; ++ pte_end = ((gpuobj->im_pramin->size >> 12) << 3) + pte; ++ while (pte < pte_end) { ++ INSTANCE_WR(priv->pramin_pt->gpuobj, (pte + 0)/4, 0x00000009); ++ INSTANCE_WR(priv->pramin_pt->gpuobj, (pte + 4)/4, 0x00000000); ++ pte += 8; ++ } ++ ++ gpuobj->im_bound = 0; ++ return 0; ++} +diff --git a/drivers/gpu/drm/nouveau/nv50_mc.c b/drivers/gpu/drm/nouveau/nv50_mc.c +new file mode 100644 +index 0000000..b111826 +--- /dev/null ++++ b/drivers/gpu/drm/nouveau/nv50_mc.c +@@ -0,0 +1,43 @@ ++/* ++ * Copyright (C) 2007 Ben Skeggs. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining ++ * a copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sublicense, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial ++ * portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. ++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE ++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION ++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION ++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "nouveau_drv.h" ++ ++int ++nv50_mc_init(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ NV_WRITE(NV03_PMC_ENABLE, 0xFFFFFFFF); ++ ++ return 0; ++} ++ ++void nv50_mc_takedown(struct drm_device *dev) ++{ ++} +diff --git a/include/drm/Kbuild b/include/drm/Kbuild +index 82b6983..d65553c 100644 +--- a/include/drm/Kbuild ++++ b/include/drm/Kbuild +@@ -8,3 +8,4 @@ unifdef-y += radeon_drm.h + unifdef-y += sis_drm.h + unifdef-y += savage_drm.h + unifdef-y += via_drm.h ++unifdef-y += nouveau_drm.h +diff --git a/include/drm/drmP.h b/include/drm/drmP.h +index 31e2f17..c6ba6a9 100644 +--- a/include/drm/drmP.h ++++ b/include/drm/drmP.h +@@ -1136,6 +1136,8 @@ extern void drm_idlelock_release(struct drm_lock_data *lock_data); + extern int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv); + + /* Buffer management support (drm_bufs.h) */ ++extern struct drm_map_list *drm_find_matching_map(struct drm_device *dev, ++ drm_local_map_t *map); + extern int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc * request); + extern int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc * request); + extern int drm_addmap(struct drm_device *dev, unsigned int offset, +diff --git a/include/drm/nouveau_drm.h b/include/drm/nouveau_drm.h +new file mode 100644 +index 0000000..a99c615 +--- /dev/null ++++ b/include/drm/nouveau_drm.h +@@ -0,0 +1,184 @@ ++/* ++ * Copyright 2005 Stephane Marchesin. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#ifndef __NOUVEAU_DRM_H__ ++#define __NOUVEAU_DRM_H__ ++ ++#define NOUVEAU_DRM_HEADER_PATCHLEVEL 11 ++ ++struct drm_nouveau_channel_alloc { ++ uint32_t fb_ctxdma_handle; ++ uint32_t tt_ctxdma_handle; ++ ++ int channel; ++ uint32_t put_base; ++ /* FIFO control regs */ ++ drm_handle_t ctrl; ++ int ctrl_size; ++ /* DMA command buffer */ ++ drm_handle_t cmdbuf; ++ int cmdbuf_size; ++ /* Notifier memory */ ++ drm_handle_t notifier; ++ int notifier_size; ++}; ++ ++struct drm_nouveau_channel_free { ++ int channel; ++}; ++ ++struct drm_nouveau_grobj_alloc { ++ int channel; ++ uint32_t handle; ++ int class; ++}; ++ ++#define NOUVEAU_MEM_ACCESS_RO 1 ++#define NOUVEAU_MEM_ACCESS_WO 2 ++#define NOUVEAU_MEM_ACCESS_RW 3 ++struct drm_nouveau_notifierobj_alloc { ++ int channel; ++ uint32_t handle; ++ int count; ++ ++ uint32_t offset; ++}; ++ ++struct drm_nouveau_gpuobj_free { ++ int channel; ++ uint32_t handle; ++}; ++ ++/* This is needed to avoid a race condition. ++ * Otherwise you may be writing in the fetch area. ++ * Is this large enough, as it's only 32 bytes, and the maximum fetch size is 256 bytes? ++ */ ++#define NOUVEAU_DMA_SKIPS 8 ++ ++#define NOUVEAU_MEM_FB 0x00000001 ++#define NOUVEAU_MEM_AGP 0x00000002 ++#define NOUVEAU_MEM_FB_ACCEPTABLE 0x00000004 ++#define NOUVEAU_MEM_AGP_ACCEPTABLE 0x00000008 ++#define NOUVEAU_MEM_PCI 0x00000010 ++#define NOUVEAU_MEM_PCI_ACCEPTABLE 0x00000020 ++#define NOUVEAU_MEM_PINNED 0x00000040 ++#define NOUVEAU_MEM_USER_BACKED 0x00000080 ++#define NOUVEAU_MEM_MAPPED 0x00000100 ++#define NOUVEAU_MEM_TILE 0x00000200 ++#define NOUVEAU_MEM_TILE_ZETA 0x00000400 ++#define NOUVEAU_MEM_INSTANCE 0x01000000 /* internal */ ++#define NOUVEAU_MEM_NOTIFIER 0x02000000 /* internal */ ++#define NOUVEAU_MEM_NOVM 0x04000000 /* internal */ ++#define NOUVEAU_MEM_USER 0x08000000 /* internal */ ++#define NOUVEAU_MEM_INTERNAL (NOUVEAU_MEM_INSTANCE | \ ++ NOUVEAU_MEM_NOTIFIER | \ ++ NOUVEAU_MEM_NOVM | \ ++ NOUVEAU_MEM_USER) ++ ++struct drm_nouveau_mem_alloc { ++ int flags; ++ int alignment; ++ uint64_t size; // in bytes ++ uint64_t offset; ++ drm_handle_t map_handle; ++}; ++ ++struct drm_nouveau_mem_free { ++ uint64_t offset; ++ int flags; ++}; ++ ++struct drm_nouveau_mem_tile { ++ uint64_t offset; ++ uint64_t delta; ++ uint64_t size; ++ int flags; ++}; ++ ++/* FIXME : maybe unify {GET,SET}PARAMs */ ++#define NOUVEAU_GETPARAM_PCI_VENDOR 3 ++#define NOUVEAU_GETPARAM_PCI_DEVICE 4 ++#define NOUVEAU_GETPARAM_BUS_TYPE 5 ++#define NOUVEAU_GETPARAM_FB_PHYSICAL 6 ++#define NOUVEAU_GETPARAM_AGP_PHYSICAL 7 ++#define NOUVEAU_GETPARAM_FB_SIZE 8 ++#define NOUVEAU_GETPARAM_AGP_SIZE 9 ++#define NOUVEAU_GETPARAM_PCI_PHYSICAL 10 ++#define NOUVEAU_GETPARAM_CHIPSET_ID 11 ++struct drm_nouveau_getparam { ++ uint64_t param; ++ uint64_t value; ++}; ++ ++#define NOUVEAU_SETPARAM_CMDBUF_LOCATION 1 ++#define NOUVEAU_SETPARAM_CMDBUF_SIZE 2 ++struct drm_nouveau_setparam { ++ uint64_t param; ++ uint64_t value; ++}; ++ ++enum nouveau_card_type { ++ NV_UNKNOWN =0, ++ NV_04 =4, ++ NV_05 =5, ++ NV_10 =10, ++ NV_11 =11, ++ NV_17 =17, ++ NV_20 =20, ++ NV_30 =30, ++ NV_40 =40, ++ NV_44 =44, ++ NV_50 =50, ++ NV_LAST =0xffff, ++}; ++ ++enum nouveau_bus_type { ++ NV_AGP =0, ++ NV_PCI =1, ++ NV_PCIE =2, ++}; ++ ++#define NOUVEAU_MAX_SAREA_CLIPRECTS 16 ++ ++struct drm_nouveau_sarea { ++ /* the cliprects */ ++ struct drm_clip_rect boxes[NOUVEAU_MAX_SAREA_CLIPRECTS]; ++ unsigned int nbox; ++}; ++ ++#define DRM_NOUVEAU_CARD_INIT 0x00 ++#define DRM_NOUVEAU_GETPARAM 0x01 ++#define DRM_NOUVEAU_SETPARAM 0x02 ++#define DRM_NOUVEAU_CHANNEL_ALLOC 0x03 ++#define DRM_NOUVEAU_CHANNEL_FREE 0x04 ++#define DRM_NOUVEAU_GROBJ_ALLOC 0x05 ++#define DRM_NOUVEAU_NOTIFIEROBJ_ALLOC 0x06 ++#define DRM_NOUVEAU_GPUOBJ_FREE 0x07 ++#define DRM_NOUVEAU_MEM_ALLOC 0x08 ++#define DRM_NOUVEAU_MEM_FREE 0x09 ++#define DRM_NOUVEAU_MEM_TILE 0x0a ++#define DRM_NOUVEAU_SUSPEND 0x0b ++#define DRM_NOUVEAU_RESUME 0x0c ++ ++#endif /* __NOUVEAU_DRM_H__ */ diff --git a/sys-kernel/geos_one-sources/files/enable-4k-stacks-default-2.6.24.patch b/sys-kernel/geos_one-sources/files/enable-4k-stacks-default-2.6.24.patch new file mode 100644 index 00000000..2bdff85c --- /dev/null +++ b/sys-kernel/geos_one-sources/files/enable-4k-stacks-default-2.6.24.patch @@ -0,0 +1,10 @@ +--- linux-2.6.24.orig/arch/x86/Kconfig.debug ++++ linux-2.6.24/arch/x86/Kconfig.debug +@@ -59,6 +59,7 @@ + config 4KSTACKS + bool "Use 4Kb for kernel stacks instead of 8Kb" + depends on DEBUG_KERNEL ++ default y + help + If you say Y here the kernel will use a 4Kb stacksize for the + kernel stack attached to each process/thread. This facilitates diff --git a/sys-kernel/geos_one-sources/files/hz-432-kconfig-option.patch b/sys-kernel/geos_one-sources/files/hz-432-kconfig-option.patch new file mode 100644 index 00000000..2fe9a4f8 --- /dev/null +++ b/sys-kernel/geos_one-sources/files/hz-432-kconfig-option.patch @@ -0,0 +1,25 @@ +diff -urN oldtree/kernel/Kconfig.hz newtree/kernel/Kconfig.hz +--- oldtree/kernel/Kconfig.hz 2007-03-06 15:00:55.000000000 -0500 ++++ newtree/kernel/Kconfig.hz 2007-03-06 17:52:36.000000000 -0500 +@@ -39,6 +39,14 @@ + on SMP and NUMA systems and exactly dividing by both PAL and + NTSC frame rates for video and multimedia work. + ++ config HZ_432 ++ bool "432 HZ" ++ help ++ 432 HZ is the best value for desktop systems. Most responsive ++ out of all the options. This is for Dual Core/Processor systems only. ++ as timer frequencies * number of processors = actual frequency. ++ Try this if you have a dual-core/dual processor system. ++ + config HZ_1000 + bool "1000 HZ" + help +@@ -52,5 +60,6 @@ + default 100 if HZ_100 + default 250 if HZ_250_NODEFAULT + default 300 if HZ_300 ++ default 432 if HZ_432 + default 1000 if HZ_1000 + diff --git a/sys-kernel/geos_one-sources/files/hz-864-kconfig-option.patch b/sys-kernel/geos_one-sources/files/hz-864-kconfig-option.patch new file mode 100644 index 00000000..6bdca045 --- /dev/null +++ b/sys-kernel/geos_one-sources/files/hz-864-kconfig-option.patch @@ -0,0 +1,25 @@ +diff -urN oldtree/kernel/Kconfig.hz newtree/kernel/Kconfig.hz +--- oldtree/kernel/Kconfig.hz 2007-03-06 15:00:55.000000000 -0500 ++++ newtree/kernel/Kconfig.hz 2007-03-06 17:52:36.000000000 -0500 +@@ -39,6 +39,14 @@ + as timer frequencies * number of processors = actual frequency. + Try this if you have a dual-core/dual processor system. + ++ config HZ_864 ++ bool "864 HZ" ++ help ++ 864 HZ is the best value for desktop systems. Most responsive ++ out of all the options. The only reason it is not default is ++ because it may break few drivers. Give it a try if you have ++ a desktop :). ++ + config HZ_1000 + bool "1000 HZ" + help +@@ -52,5 +60,6 @@ + default 250 if HZ_250_NODEFAULT + default 300 if HZ_300 + default 432 if HZ_432 ++ default 864 if HZ_864 + default 1000 if HZ_1000 + diff --git a/sys-kernel/geos_one-sources/files/linux-2.6-defaults-fat-utf8.patch b/sys-kernel/geos_one-sources/files/linux-2.6-defaults-fat-utf8.patch new file mode 100644 index 00000000..0d40fd3b --- /dev/null +++ b/sys-kernel/geos_one-sources/files/linux-2.6-defaults-fat-utf8.patch @@ -0,0 +1,15 @@ + +https://bugzilla.redhat.com/bugzilla/show_bug.cgi?id=181963 + +--- linux-2.6.15.noarch/fs/fat/inode.c~ 2006-02-20 23:20:12.000000000 -0500 ++++ linux-2.6.15.noarch/fs/fat/inode.c 2006-02-20 23:21:42.000000000 -0500 +@@ -952,7 +952,8 @@ static int parse_options(char *options, + opts->shortname = 0; + opts->name_check = 'n'; + opts->quiet = opts->showexec = opts->sys_immutable = opts->dotsOK = 0; +- opts->utf8 = opts->unicode_xlate = 0; ++ opts->utf8 = 1; ++ opts->unicode_xlate = 0; + opts->numtail = 1; + opts->nocase = 0; + *debug = 0; diff --git a/sys-kernel/geos_one-sources/files/linux-2.6-x86-tune-generic.patch b/sys-kernel/geos_one-sources/files/linux-2.6-x86-tune-generic.patch new file mode 100644 index 00000000..7a7c76eb --- /dev/null +++ b/sys-kernel/geos_one-sources/files/linux-2.6-x86-tune-generic.patch @@ -0,0 +1,13 @@ +* Optimise for today's CPUs. + +--- linux-2.6/arch/x86/Makefile_32.cpu 2006-01-09 11:39:04.000000000 -0500 ++++ linux-2.6/arch/x86/Makefile_32.cpu 2006-01-09 11:39:36.000000000 -0500 +@@ -15,7 +15,7 @@ cflags-$(CONFIG_M486) += -march=i486 + cflags-$(CONFIG_M586) += -march=i586 + cflags-$(CONFIG_M586TSC) += -march=i586 + cflags-$(CONFIG_M586MMX) += -march=pentium-mmx +-cflags-$(CONFIG_M686) += -march=i686 ++cflags-$(CONFIG_M686) += -march=i686 $(call tune,generic) + cflags-$(CONFIG_MPENTIUMII) += -march=i686 $(call tune,pentium2) + cflags-$(CONFIG_MPENTIUMIII) += -march=i686 $(call tune,pentium3) + cflags-$(CONFIG_MPENTIUMM) += -march=i686 $(call tune,pentium3) diff --git a/sys-kernel/geos_one-sources/files/linux-2.6.27-lirc.patch b/sys-kernel/geos_one-sources/files/linux-2.6.27-lirc.patch new file mode 100644 index 00000000..e8e605dd --- /dev/null +++ b/sys-kernel/geos_one-sources/files/linux-2.6.27-lirc.patch @@ -0,0 +1,15065 @@ +diff --git a/drivers/input/Kconfig b/drivers/input/Kconfig +index 5f9d860..2ba0904 100644 +--- a/drivers/input/Kconfig ++++ b/drivers/input/Kconfig +@@ -170,6 +170,8 @@ source "drivers/input/tablet/Kconfig" + + source "drivers/input/touchscreen/Kconfig" + ++source "drivers/input/lirc/Kconfig" ++ + source "drivers/input/misc/Kconfig" + + endif +diff --git a/drivers/input/Makefile b/drivers/input/Makefile +index 98c4f9a..6a1049b 100644 +--- a/drivers/input/Makefile ++++ b/drivers/input/Makefile +@@ -25,3 +25,5 @@ obj-$(CONFIG_INPUT_MISC) += misc/ + obj-$(CONFIG_INPUT_APMPOWER) += apm-power.o + + obj-$(CONFIG_XEN_KBDDEV_FRONTEND) += xen-kbdfront.o ++ ++obj-$(CONFIG_INPUT_LIRC) += lirc/ +diff --git a/drivers/input/lirc/Kconfig b/drivers/input/lirc/Kconfig +new file mode 100644 +index 0000000..fad3bbb +--- /dev/null ++++ b/drivers/input/lirc/Kconfig +@@ -0,0 +1,128 @@ ++# ++# LIRC driver(s) configuration ++# ++menuconfig INPUT_LIRC ++ bool "Linux Infrared Remote Control IR receiver/transmitter drivers" ++ default n ++ help ++ Say Y here, and all supported Linux Infrared Remote Control IR and ++ RF receiver and transmitter drivers will be displayed. When paired ++ with a remote control and the lirc daemon, the receiver drivers ++ allow control of your Linux system via remote control. ++ ++if INPUT_LIRC ++ ++config LIRC_DEV ++ tristate "LIRC device loadable module support" ++ default n ++ help ++ LIRC device loadable module support, required for most LIRC drivers ++ ++config LIRC_BT829 ++ tristate "BT829 based hardware" ++ default n ++ depends on LIRC_DEV ++ help ++ Driver for the IR interface on BT829-based hardware ++ ++config LIRC_I2C ++ tristate "I2C Based IR Receivers" ++ default n ++ depends on LIRC_DEV ++ help ++ Driver for I2C-based IR receivers, such as those commonly ++ found onboard Hauppauge PVR-150/250/350 video capture cards ++ ++config LIRC_IGORPLUGUSB ++ tristate "Igor Cesko's USB IR Receiver" ++ default n ++ depends on LIRC_DEV ++ help ++ Driver for Igor Cesko's USB IR Receiver ++ ++config LIRC_IMON ++ tristate "Soundgraph IMON Receiver" ++ default n ++ depends on LIRC_DEV ++ help ++ Driver for the Soundgraph IMON IR Receiver ++ ++config LIRC_IT87 ++ tristate "ITE IT87XX CIR Port Receiver" ++ default n ++ depends on LIRC_DEV ++ help ++ Driver for the ITE IT87xx IR Receiver ++ ++config LIRC_ITE8709 ++ tristate "ITE8709 CIR Port Receiver" ++ default n ++ depends on LIRC_DEV && PNP ++ help ++ Driver for the ITE8709 IR Receiver ++ ++config LIRC_MCEUSB ++ tristate "Microsoft Media Center Ed. Receiver, v1" ++ default n ++ depends on LIRC_DEV ++ help ++ Driver for the Microsoft Media Center Ed. Receiver, v1 ++ ++config LIRC_MCEUSB2 ++ tristate "Microsoft Media Center Ed. Receiver, v2" ++ default n ++ depends on LIRC_DEV ++ help ++ Driver for the Microsoft Media Center Ed. Receiver, v2 ++ ++config LIRC_PARALLEL ++ tristate "Homebrew Parallel Port Receiver" ++ default n ++ depends on LIRC_DEV && !SMP ++ help ++ Driver for Homebrew Parallel Port Receivers ++ ++config LIRC_SASEM ++ tristate "Sasem USB IR Remote" ++ default n ++ depends on LIRC_DEV ++ help ++ Driver for the Sasem OnAir Remocon-V or Dign HV5 HTPC IR/VFD Module ++ ++config LIRC_SERIAL ++ tristate "Homebrew Serial Port Receiver" ++ default n ++ depends on LIRC_DEV ++ help ++ Driver for Homebrew Serial Port Receivers ++ ++config LIRC_SIR ++ tristate "Built-in SIR IrDA port" ++ default n ++ depends on LIRC_DEV ++ help ++ Driver for the SIR IrDA port ++ ++config LIRC_STREAMZAP ++ tristate "Streamzap PC Receiver" ++ default n ++ depends on LIRC_DEV ++ help ++ Driver for the Streamzap PC Receiver ++ ++config LIRC_TTUSBIR ++ tristate "Technotrend USB IR Receiver" ++ default n ++ depends on LIRC_DEV ++ help ++ Driver for the Technotrend USB IR Receiver ++ ++config LIRC_ZILOG ++ tristate "Zilog/Hauppauge IR Transmitter" ++ default n ++ depends on LIRC_DEV ++ help ++ Driver for the Zilog/Hauppauge IR Transmitter, found on ++ PVR-150/500, HVR-1200/1250/1700/1800, HD-PVR and other cards ++ ++endif +diff --git a/drivers/input/lirc/Makefile b/drivers/input/lirc/Makefile +new file mode 100644 +index 0000000..ceb5c86 +--- /dev/null ++++ b/drivers/input/lirc/Makefile +@@ -0,0 +1,23 @@ ++# Makefile for the lirc drivers. ++# ++ ++# Each configuration option enables a list of files. ++ ++EXTRA_CFLAGS =-DIRCTL_DEV_MAJOR=61 -DLIRC_SERIAL_TRANSMITTER -I$(src) ++ ++obj-$(CONFIG_LIRC_DEV) += lirc_dev.o ++obj-$(CONFIG_LIRC_BT829) += lirc_bt829.o ++obj-$(CONFIG_LIRC_I2C) += lirc_i2c.o ++obj-$(CONFIG_LIRC_IGORPLUGUSB) += lirc_igorplugusb.o ++obj-$(CONFIG_LIRC_IMON) += lirc_imon.o ++obj-$(CONFIG_LIRC_IT87) += lirc_it87.o ++obj-$(CONFIG_LIRC_ITE8709) += lirc_ite8709.o ++obj-$(CONFIG_LIRC_MCEUSB) += lirc_mceusb.o ++obj-$(CONFIG_LIRC_MCEUSB2) += lirc_mceusb2.o ++obj-$(CONFIG_LIRC_PARALLEL) += lirc_parallel.o ++obj-$(CONFIG_LIRC_SASEM) += lirc_sasem.o ++obj-$(CONFIG_LIRC_SERIAL) += lirc_serial.o ++obj-$(CONFIG_LIRC_SIR) += lirc_sir.o ++obj-$(CONFIG_LIRC_STREAMZAP) += lirc_streamzap.o ++obj-$(CONFIG_LIRC_TTUSBIR) += lirc_ttusbir.o ++obj-$(CONFIG_LIRC_ZILOG) += lirc_zilog.o +diff --git a/drivers/input/lirc/lirc.h b/drivers/input/lirc/lirc.h +new file mode 100644 +index 0000000..dcdb6e8 +--- /dev/null ++++ b/drivers/input/lirc/lirc.h +@@ -0,0 +1,103 @@ ++/* ++ * lirc.h - linux infrared remote control header file ++ * last modified 2007/09/27 ++ */ ++ ++#ifndef _LINUX_LIRC_H ++#define _LINUX_LIRC_H ++ ++#include ++#include ++ ++#define PULSE_BIT 0x01000000 ++#define PULSE_MASK 0x00FFFFFF ++ ++/* ++ * lirc compatible hardware features ++ */ ++ ++ ++#define LIRC_MODE2SEND(x) (x) ++#define LIRC_SEND2MODE(x) (x) ++#define LIRC_MODE2REC(x) ((x) << 16) ++#define LIRC_REC2MODE(x) ((x) >> 16) ++ ++#define LIRC_MODE_RAW 0x00000001 ++#define LIRC_MODE_PULSE 0x00000002 ++#define LIRC_MODE_MODE2 0x00000004 ++#define LIRC_MODE_CODE 0x00000008 ++#define LIRC_MODE_LIRCCODE 0x00000010 ++#define LIRC_MODE_STRING 0x00000020 ++ ++ ++#define LIRC_CAN_SEND_RAW LIRC_MODE2SEND(LIRC_MODE_RAW) ++#define LIRC_CAN_SEND_PULSE LIRC_MODE2SEND(LIRC_MODE_PULSE) ++#define LIRC_CAN_SEND_MODE2 LIRC_MODE2SEND(LIRC_MODE_MODE2) ++#define LIRC_CAN_SEND_CODE LIRC_MODE2SEND(LIRC_MODE_CODE) ++#define LIRC_CAN_SEND_LIRCCODE LIRC_MODE2SEND(LIRC_MODE_LIRCCODE) ++#define LIRC_CAN_SEND_STRING LIRC_MODE2SEND(LIRC_MODE_STRING) ++ ++#define LIRC_CAN_SEND_MASK 0x0000003f ++ ++#define LIRC_CAN_SET_SEND_CARRIER 0x00000100 ++#define LIRC_CAN_SET_SEND_DUTY_CYCLE 0x00000200 ++#define LIRC_CAN_SET_TRANSMITTER_MASK 0x00000400 ++ ++#define LIRC_CAN_REC_RAW LIRC_MODE2REC(LIRC_MODE_RAW) ++#define LIRC_CAN_REC_PULSE LIRC_MODE2REC(LIRC_MODE_PULSE) ++#define LIRC_CAN_REC_MODE2 LIRC_MODE2REC(LIRC_MODE_MODE2) ++#define LIRC_CAN_REC_CODE LIRC_MODE2REC(LIRC_MODE_CODE) ++#define LIRC_CAN_REC_LIRCCODE LIRC_MODE2REC(LIRC_MODE_LIRCCODE) ++#define LIRC_CAN_REC_STRING LIRC_MODE2REC(LIRC_MODE_STRING) ++ ++#define LIRC_CAN_REC_MASK LIRC_MODE2REC(LIRC_CAN_SEND_MASK) ++ ++#define LIRC_CAN_SET_REC_CARRIER (LIRC_CAN_SET_SEND_CARRIER << 16) ++#define LIRC_CAN_SET_REC_DUTY_CYCLE (LIRC_CAN_SET_SEND_DUTY_CYCLE << 16) ++ ++#define LIRC_CAN_SET_REC_DUTY_CYCLE_RANGE 0x40000000 ++#define LIRC_CAN_SET_REC_CARRIER_RANGE 0x80000000 ++#define LIRC_CAN_GET_REC_RESOLUTION 0x20000000 ++ ++#define LIRC_CAN_SEND(x) ((x)&LIRC_CAN_SEND_MASK) ++#define LIRC_CAN_REC(x) ((x)&LIRC_CAN_REC_MASK) ++ ++#define LIRC_CAN_NOTIFY_DECODE 0x01000000 ++ ++/* ++ * IOCTL commands for lirc driver ++ */ ++ ++#define LIRC_GET_FEATURES _IOR('i', 0x00000000, __u32) ++ ++#define LIRC_GET_SEND_MODE _IOR('i', 0x00000001, __u32) ++#define LIRC_GET_REC_MODE _IOR('i', 0x00000002, __u32) ++#define LIRC_GET_SEND_CARRIER _IOR('i', 0x00000003, __u32) ++#define LIRC_GET_REC_CARRIER _IOR('i', 0x00000004, __u32) ++#define LIRC_GET_SEND_DUTY_CYCLE _IOR('i', 0x00000005, __u32) ++#define LIRC_GET_REC_DUTY_CYCLE _IOR('i', 0x00000006, __u32) ++#define LIRC_GET_REC_RESOLUTION _IOR('i', 0x00000007, __u32) ++ ++/* code length in bits, currently only for LIRC_MODE_LIRCCODE */ ++#define LIRC_GET_LENGTH _IOR('i', 0x0000000f, __u32) ++ ++#define LIRC_SET_SEND_MODE _IOW('i', 0x00000011, __u32) ++#define LIRC_SET_REC_MODE _IOW('i', 0x00000012, __u32) ++/* Note: these can reset the according pulse_width */ ++#define LIRC_SET_SEND_CARRIER _IOW('i', 0x00000013, __u32) ++#define LIRC_SET_REC_CARRIER _IOW('i', 0x00000014, __u32) ++#define LIRC_SET_SEND_DUTY_CYCLE _IOW('i', 0x00000015, __u32) ++#define LIRC_SET_REC_DUTY_CYCLE _IOW('i', 0x00000016, __u32) ++#define LIRC_SET_TRANSMITTER_MASK _IOW('i', 0x00000017, __u32) ++ ++/* to set a range use ++ LIRC_SET_REC_DUTY_CYCLE_RANGE/LIRC_SET_REC_CARRIER_RANGE with the ++ lower bound first and later ++ LIRC_SET_REC_DUTY_CYCLE/LIRC_SET_REC_CARRIER with the upper bound */ ++ ++#define LIRC_SET_REC_DUTY_CYCLE_RANGE _IOW('i', 0x0000001e, __u32) ++#define LIRC_SET_REC_CARRIER_RANGE _IOW('i', 0x0000001f, __u32) ++ ++#define LIRC_NOTIFY_DECODE _IO('i', 0x00000020) ++ ++#endif +diff --git a/drivers/input/lirc/lirc_bt829.c b/drivers/input/lirc/lirc_bt829.c +new file mode 100644 +index 0000000..01cbdfe +--- /dev/null ++++ b/drivers/input/lirc/lirc_bt829.c +@@ -0,0 +1,388 @@ ++/* ++ * Remote control driver for the TV-card based on bt829 ++ * ++ * by Leonid Froenchenko ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ++*/ ++ ++#include ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "lirc_dev.h" ++ ++static int poll_main(void); ++static int atir_init_start(void); ++ ++static void write_index(unsigned char index, unsigned int value); ++static unsigned int read_index(unsigned char index); ++ ++static void do_i2c_start(void); ++static void do_i2c_stop(void); ++ ++static void seems_wr_byte(unsigned char al); ++static unsigned char seems_rd_byte(void); ++ ++static unsigned int read_index(unsigned char al); ++static void write_index(unsigned char ah, unsigned int edx); ++ ++static void cycle_delay(int cycle); ++ ++static void do_set_bits(unsigned char bl); ++static unsigned char do_get_bits(void); ++ ++#define DATA_PCI_OFF 0x7FFC00 ++#define WAIT_CYCLE 20 ++ ++static int debug; ++#define dprintk(fmt, args...) \ ++ do { \ ++ if (debug) \ ++ printk(KERN_DEBUG fmt, ## args); \ ++ } while (0) ++ ++static int atir_minor; ++static unsigned long pci_addr_phys; ++static unsigned char *pci_addr_lin; ++ ++static struct lirc_plugin atir_plugin; ++ ++static struct pci_dev *do_pci_probe(void) ++{ ++ struct pci_dev *my_dev; ++ my_dev = pci_get_device(PCI_VENDOR_ID_ATI, ++ PCI_DEVICE_ID_ATI_264VT, NULL); ++ if (my_dev) { ++ printk(KERN_ERR "ATIR: Using device: %s\n", ++ pci_name(my_dev)); ++ pci_addr_phys = 0; ++ if (my_dev->resource[0].flags & IORESOURCE_MEM) { ++ pci_addr_phys = my_dev->resource[0].start; ++ printk(KERN_INFO "ATIR memory at 0x%08X \n", ++ (unsigned int)pci_addr_phys); ++ } ++ if (pci_addr_phys == 0) { ++ printk(KERN_ERR "ATIR no memory resource ?\n"); ++ return NULL; ++ } ++ } else { ++ printk(KERN_ERR "ATIR: pci_prob failed\n"); ++ return NULL; ++ } ++ return my_dev; ++} ++ ++static int atir_add_to_buf(void *data, struct lirc_buffer *buf) ++{ ++ unsigned char key; ++ int status; ++ status = poll_main(); ++ key = (status >> 8) & 0xFF; ++ if (status & 0xFF) { ++ dprintk("ATIR reading key %02X\n", key); ++ lirc_buffer_write_1(buf, &key); ++ return 0; ++ } ++ return -ENODATA; ++} ++ ++static int atir_set_use_inc(void *data) ++{ ++ dprintk("ATIR driver is opened\n"); ++ return 0; ++} ++ ++static void atir_set_use_dec(void *data) ++{ ++ dprintk("ATIR driver is closed\n"); ++} ++ ++int init_module(void) ++{ ++ struct pci_dev *pdev; ++ ++ pdev = do_pci_probe(); ++ if (pdev == NULL) ++ return 1; ++ ++ if (!atir_init_start()) ++ return 1; ++ ++ strcpy(atir_plugin.name, "ATIR"); ++ atir_plugin.minor = -1; ++ atir_plugin.code_length = 8; ++ atir_plugin.sample_rate = 10; ++ atir_plugin.data = 0; ++ atir_plugin.add_to_buf = atir_add_to_buf; ++ atir_plugin.set_use_inc = atir_set_use_inc; ++ atir_plugin.set_use_dec = atir_set_use_dec; ++ atir_plugin.dev = &pdev->dev; ++ atir_plugin.owner = THIS_MODULE; ++ ++ atir_minor = lirc_register_plugin(&atir_plugin); ++ dprintk("ATIR driver is registered on minor %d\n", atir_minor); ++ ++ return 0; ++} ++ ++ ++void cleanup_module(void) ++{ ++ lirc_unregister_plugin(atir_minor); ++} ++ ++ ++static int atir_init_start(void) ++{ ++ pci_addr_lin = ioremap(pci_addr_phys + DATA_PCI_OFF, 0x400); ++ if (pci_addr_lin == 0) { ++ printk(KERN_INFO "atir: pci mem must be mapped\n"); ++ return 0; ++ } ++ return 1; ++} ++ ++static void cycle_delay(int cycle) ++{ ++ udelay(WAIT_CYCLE*cycle); ++} ++ ++ ++static int poll_main() ++{ ++ unsigned char status_high, status_low; ++ ++ do_i2c_start(); ++ ++ seems_wr_byte(0xAA); ++ seems_wr_byte(0x01); ++ ++ do_i2c_start(); ++ ++ seems_wr_byte(0xAB); ++ ++ status_low = seems_rd_byte(); ++ status_high = seems_rd_byte(); ++ ++ do_i2c_stop(); ++ ++ return (status_high << 8) | status_low; ++} ++ ++static void do_i2c_start(void) ++{ ++ do_set_bits(3); ++ cycle_delay(4); ++ ++ do_set_bits(1); ++ cycle_delay(7); ++ ++ do_set_bits(0); ++ cycle_delay(2); ++} ++ ++static void do_i2c_stop(void) ++{ ++ unsigned char bits; ++ bits = do_get_bits() & 0xFD; ++ do_set_bits(bits); ++ cycle_delay(1); ++ ++ bits |= 1; ++ do_set_bits(bits); ++ cycle_delay(2); ++ ++ bits |= 2; ++ do_set_bits(bits); ++ bits = 3; ++ do_set_bits(bits); ++ cycle_delay(2); ++} ++ ++static void seems_wr_byte(unsigned char value) ++{ ++ int i; ++ unsigned char reg; ++ ++ reg = do_get_bits(); ++ for (i = 0; i < 8; i++) { ++ if (value & 0x80) ++ reg |= 0x02; ++ else ++ reg &= 0xFD; ++ ++ do_set_bits(reg); ++ cycle_delay(1); ++ ++ reg |= 1; ++ do_set_bits(reg); ++ cycle_delay(1); ++ ++ reg &= 0xFE; ++ do_set_bits(reg); ++ cycle_delay(1); ++ value <<= 1; ++ } ++ cycle_delay(2); ++ ++ reg |= 2; ++ do_set_bits(reg); ++ ++ reg |= 1; ++ do_set_bits(reg); ++ ++ cycle_delay(1); ++ do_get_bits(); ++ ++ reg &= 0xFE; ++ do_set_bits(reg); ++ cycle_delay(3); ++} ++ ++static unsigned char seems_rd_byte(void) ++{ ++ int i; ++ int rd_byte; ++ unsigned char bits_2, bits_1; ++ ++ bits_1 = do_get_bits() | 2; ++ do_set_bits(bits_1); ++ ++ rd_byte = 0; ++ for (i = 0; i < 8; i++) { ++ bits_1 &= 0xFE; ++ do_set_bits(bits_1); ++ cycle_delay(2); ++ ++ bits_1 |= 1; ++ do_set_bits(bits_1); ++ cycle_delay(1); ++ ++ bits_2 = do_get_bits(); ++ if (bits_2 & 2) ++ rd_byte |= 1; ++ ++ rd_byte <<= 1; ++ } ++ ++ bits_1 = 0; ++ if (bits_2 == 0) ++ bits_1 |= 2; ++ ++ do_set_bits(bits_1); ++ cycle_delay(2); ++ ++ bits_1 |= 1; ++ do_set_bits(bits_1); ++ cycle_delay(3); ++ ++ bits_1 &= 0xFE; ++ do_set_bits(bits_1); ++ cycle_delay(2); ++ ++ rd_byte >>= 1; ++ rd_byte &= 0xFF; ++ return rd_byte; ++} ++ ++static void do_set_bits(unsigned char new_bits) ++{ ++ int reg_val; ++ reg_val = read_index(0x34); ++ if (new_bits & 2) { ++ reg_val &= 0xFFFFFFDF; ++ reg_val |= 1; ++ } else { ++ reg_val &= 0xFFFFFFFE; ++ reg_val |= 0x20; ++ } ++ reg_val |= 0x10; ++ write_index(0x34, reg_val); ++ ++ reg_val = read_index(0x31); ++ if (new_bits & 1) ++ reg_val |= 0x1000000; ++ else ++ reg_val &= 0xFEFFFFFF; ++ ++ reg_val |= 0x8000000; ++ write_index(0x31, reg_val); ++} ++ ++static unsigned char do_get_bits(void) ++{ ++ unsigned char bits; ++ int reg_val; ++ ++ reg_val = read_index(0x34); ++ reg_val |= 0x10; ++ reg_val &= 0xFFFFFFDF; ++ write_index(0x34, reg_val); ++ ++ reg_val = read_index(0x34); ++ bits = 0; ++ if (reg_val & 8) ++ bits |= 2; ++ else ++ bits &= 0xFD; ++ ++ reg_val = read_index(0x31); ++ if (reg_val & 0x1000000) ++ bits |= 1; ++ else ++ bits &= 0xFE; ++ ++ return bits; ++} ++ ++static unsigned int read_index(unsigned char index) ++{ ++ unsigned char *addr; ++ unsigned int value; ++ /* addr = pci_addr_lin + DATA_PCI_OFF + ((index & 0xFF) << 2); */ ++ addr = pci_addr_lin + ((index & 0xFF) << 2); ++ value = readl(addr); ++ return value; ++} ++ ++static void write_index(unsigned char index, unsigned int reg_val) ++{ ++ unsigned char *addr; ++ addr = pci_addr_lin + ((index & 0xFF) << 2); ++ writel(reg_val, addr); ++} ++ ++MODULE_AUTHOR("Froenchenko Leonid"); ++MODULE_DESCRIPTION("IR remote driver for bt829 based TV cards"); ++MODULE_LICENSE("GPL"); ++ ++module_param(debug, bool, 0644); ++MODULE_PARM_DESC(debug, "Debug enabled or not"); ++ ++/* ++ * Overrides for Emacs so that we follow Linus's tabbing style. ++ * --------------------------------------------------------------------------- ++ * Local variables: ++ * c-basic-offset: 8 ++ * End: ++ */ +diff --git a/drivers/input/lirc/lirc_dev.c b/drivers/input/lirc/lirc_dev.c +new file mode 100644 +index 0000000..c8f325c +--- /dev/null ++++ b/drivers/input/lirc/lirc_dev.c +@@ -0,0 +1,809 @@ ++/* ++ * LIRC base driver ++ * ++ * (L) by Artur Lipowski ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ++ * ++ */ ++ ++#include ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#define __KERNEL_SYSCALLS__ ++#include ++#include ++ ++/* SysFS header */ ++#include ++ ++#include "lirc.h" ++#include "lirc_dev.h" ++ ++static int debug; ++#define dprintk(fmt, args...) \ ++ do { \ ++ if (debug) \ ++ printk(KERN_DEBUG fmt, ## args); \ ++ } while (0) ++ ++#define IRCTL_DEV_NAME "BaseRemoteCtl" ++#define SUCCESS 0 ++#define NOPLUG -1 ++#define LOGHEAD "lirc_dev (%s[%d]): " ++ ++struct irctl { ++ struct lirc_plugin p; ++ int attached; ++ int open; ++ ++ struct mutex buffer_lock; ++ struct lirc_buffer *buf; ++ ++ struct task_struct *task; ++ long jiffies_to_wait; ++ ++}; ++ ++static DEFINE_MUTEX(plugin_lock); ++ ++static struct irctl irctls[MAX_IRCTL_DEVICES]; ++static struct file_operations fops; ++ ++/* Only used for sysfs but defined to void otherwise */ ++static struct class *lirc_class; ++ ++/* helper function ++ * initializes the irctl structure ++ */ ++static inline void init_irctl(struct irctl *ir) ++{ ++ memset(&ir->p, 0, sizeof(struct lirc_plugin)); ++ mutex_init(&ir->buffer_lock); ++ ir->p.minor = NOPLUG; ++ ++ ir->task = NULL; ++ ir->jiffies_to_wait = 0; ++ ++ ir->open = 0; ++ ir->attached = 0; ++} ++ ++static void cleanup(struct irctl *ir) ++{ ++ dprintk(LOGHEAD "cleaning up\n", ir->p.name, ir->p.minor); ++ ++ device_destroy(lirc_class, MKDEV(IRCTL_DEV_MAJOR, ir->p.minor)); ++ ++ if (ir->buf != ir->p.rbuf) { ++ lirc_buffer_free(ir->buf); ++ kfree(ir->buf); ++ } ++ ir->buf = NULL; ++ ++ init_irctl(ir); ++} ++ ++/* helper function ++ * reads key codes from plugin and puts them into buffer ++ * buffer free space is checked and locking performed ++ * returns 0 on success ++ */ ++static inline int add_to_buf(struct irctl *ir) ++{ ++ if (lirc_buffer_full(ir->buf)) { ++ dprintk(LOGHEAD "buffer overflow\n", ++ ir->p.name, ir->p.minor); ++ return -EOVERFLOW; ++ } ++ ++ if (ir->p.add_to_buf) { ++ int res = -ENODATA; ++ int got_data = 0; ++ ++ /* service the device as long as it is returning ++ * data and we have space ++ */ ++ while (!lirc_buffer_full(ir->buf)) { ++ res = ir->p.add_to_buf(ir->p.data, ir->buf); ++ if (res == SUCCESS) ++ got_data++; ++ else ++ break; ++ } ++ ++ if (res == -ENODEV) ++ kthread_stop(ir->task); ++ ++ return got_data ? SUCCESS : res; ++ } ++ ++ return SUCCESS; ++} ++ ++/* main function of the polling thread ++ */ ++static int lirc_thread(void *irctl) ++{ ++ struct irctl *ir = irctl; ++ ++ /* This thread doesn't need any user-level access, ++ * so get rid of all our resources ++ */ ++ ++ dprintk(LOGHEAD "poll thread started\n", ir->p.name, ir->p.minor); ++ ++ do { ++ if (ir->open) { ++ if (ir->jiffies_to_wait) { ++ set_current_state(TASK_INTERRUPTIBLE); ++ schedule_timeout(ir->jiffies_to_wait); ++ } else { ++ interruptible_sleep_on( ++ ir->p.get_queue(ir->p.data)); ++ } ++ if (kthread_should_stop()) ++ break; ++ if (!add_to_buf(ir)) ++ wake_up_interruptible(&ir->buf->wait_poll); ++ } else { ++ /* if device not opened so we can sleep half a second */ ++ set_current_state(TASK_INTERRUPTIBLE); ++ schedule_timeout(HZ/2); ++ } ++ } while (!kthread_should_stop()); ++ ++ dprintk(LOGHEAD "poll thread ended\n", ir->p.name, ir->p.minor); ++ ++ return 0; ++} ++ ++int lirc_register_plugin(struct lirc_plugin *p) ++{ ++ struct irctl *ir; ++ int minor; ++ int bytes_in_key; ++ int err; ++ DECLARE_COMPLETION(tn); ++ ++ if (!p) { ++ printk(KERN_ERR "lirc_dev: lirc_register_plugin: " ++ "plugin pointer must be not NULL!\n"); ++ err = -EBADRQC; ++ goto out; ++ } ++ ++ if (MAX_IRCTL_DEVICES <= p->minor) { ++ printk(KERN_ERR "lirc_dev: lirc_register_plugin: " ++ "\"minor\" must be between 0 and %d (%d)!\n", ++ MAX_IRCTL_DEVICES-1, p->minor); ++ err = -EBADRQC; ++ goto out; ++ } ++ ++ if (1 > p->code_length || (BUFLEN * 8) < p->code_length) { ++ printk(KERN_ERR "lirc_dev: lirc_register_plugin: " ++ "code length in bits for minor (%d) " ++ "must be less than %d!\n", ++ p->minor, BUFLEN * 8); ++ err = -EBADRQC; ++ goto out; ++ } ++ ++ printk(KERN_INFO "lirc_dev: lirc_register_plugin: sample_rate: %d\n", ++ p->sample_rate); ++ if (p->sample_rate) { ++ if (2 > p->sample_rate || HZ < p->sample_rate) { ++ printk(KERN_ERR "lirc_dev: lirc_register_plugin: " ++ "sample_rate must be between 2 and %d!\n", HZ); ++ err = -EBADRQC; ++ goto out; ++ } ++ if (!p->add_to_buf) { ++ printk(KERN_ERR "lirc_dev: lirc_register_plugin: " ++ "add_to_buf cannot be NULL when " ++ "sample_rate is set\n"); ++ err = -EBADRQC; ++ goto out; ++ } ++ } else if (!(p->fops && p->fops->read) ++ && !p->get_queue && !p->rbuf) { ++ printk(KERN_ERR "lirc_dev: lirc_register_plugin: " ++ "fops->read, get_queue and rbuf " ++ "cannot all be NULL!\n"); ++ err = -EBADRQC; ++ goto out; ++ } else if (!p->get_queue && !p->rbuf) { ++ if (!(p->fops && p->fops->read && p->fops->poll) ++ || (!p->fops->ioctl && !p->ioctl)) { ++ printk(KERN_ERR "lirc_dev: lirc_register_plugin: " ++ "neither read, poll nor ioctl can be NULL!\n"); ++ err = -EBADRQC; ++ goto out; ++ } ++ } ++ ++ if (p->owner == NULL) { ++ printk(KERN_ERR "lirc_dev: lirc_register_plugin: " ++ "no module owner registered\n"); ++ err = -EBADRQC; ++ goto out; ++ } ++ ++ mutex_lock(&plugin_lock); ++ ++ minor = p->minor; ++ ++ if (0 > minor) { ++ /* find first free slot for plugin */ ++ for (minor = 0; minor < MAX_IRCTL_DEVICES; minor++) ++ if (irctls[minor].p.minor == NOPLUG) ++ break; ++ if (MAX_IRCTL_DEVICES == minor) { ++ printk(KERN_ERR "lirc_dev: lirc_register_plugin: " ++ "no free slots for plugins!\n"); ++ err = -ENOMEM; ++ goto out_lock; ++ } ++ } else if (irctls[minor].p.minor != NOPLUG) { ++ printk(KERN_ERR "lirc_dev: lirc_register_plugin: " ++ "minor (%d) just registered!\n", minor); ++ err = -EBUSY; ++ goto out_lock; ++ } ++ ++ ir = &irctls[minor]; ++ ++ if (p->sample_rate) { ++ ir->jiffies_to_wait = HZ / p->sample_rate; ++ } else { ++ /* it means - wait for external event in task queue */ ++ ir->jiffies_to_wait = 0; ++ } ++ ++ /* some safety check 8-) */ ++ p->name[sizeof(p->name)-1] = '\0'; ++ ++ bytes_in_key = p->code_length/8 + (p->code_length%8 ? 1 : 0); ++ ++ if (p->rbuf) { ++ ir->buf = p->rbuf; ++ } else { ++ ir->buf = kmalloc(sizeof(struct lirc_buffer), GFP_KERNEL); ++ if (!ir->buf) { ++ err = -ENOMEM; ++ goto out_lock; ++ } ++ if (lirc_buffer_init(ir->buf, bytes_in_key, ++ BUFLEN/bytes_in_key) != 0) { ++ kfree(ir->buf); ++ err = -ENOMEM; ++ goto out_lock; ++ } ++ } ++ ++ if (p->features == 0) ++ p->features = (p->code_length > 8) ? ++ LIRC_CAN_REC_LIRCCODE : LIRC_CAN_REC_CODE; ++ ++ ir->p = *p; ++ ir->p.minor = minor; ++ ++ device_create(lirc_class, ir->p.dev, ++ MKDEV(IRCTL_DEV_MAJOR, ir->p.minor), NULL, ++ "lirc%u", ir->p.minor); ++ ++ if (p->sample_rate || p->get_queue) { ++ /* try to fire up polling thread */ ++ ir->task = kthread_run(lirc_thread, (void *)ir, "lirc_dev"); ++ if (IS_ERR(ir->task)) { ++ printk(KERN_ERR "lirc_dev: lirc_register_plugin: " ++ "cannot run poll thread for minor = %d\n", ++ p->minor); ++ err = -ECHILD; ++ goto out_sysfs; ++ } ++ } ++ ir->attached = 1; ++ mutex_unlock(&plugin_lock); ++ ++/* ++ * Recent kernels should handle this autmatically by increasing/decreasing ++ * use count when a dependant module is loaded/unloaded. ++ */ ++ dprintk("lirc_dev: plugin %s registered at minor number = %d\n", ++ ir->p.name, ir->p.minor); ++ p->minor = minor; ++ return minor; ++ ++out_sysfs: ++ device_destroy(lirc_class, MKDEV(IRCTL_DEV_MAJOR, ir->p.minor)); ++out_lock: ++ mutex_unlock(&plugin_lock); ++out: ++ return err; ++} ++EXPORT_SYMBOL(lirc_register_plugin); ++ ++int lirc_unregister_plugin(int minor) ++{ ++ struct irctl *ir; ++ DECLARE_COMPLETION(tn); ++ DECLARE_COMPLETION(tn2); ++ ++ if (minor < 0 || minor >= MAX_IRCTL_DEVICES) { ++ printk(KERN_ERR "lirc_dev: lirc_unregister_plugin: " ++ "\"minor\" must be between 0 and %d!\n", ++ MAX_IRCTL_DEVICES-1); ++ return -EBADRQC; ++ } ++ ++ ir = &irctls[minor]; ++ ++ mutex_lock(&plugin_lock); ++ ++ if (ir->p.minor != minor) { ++ printk(KERN_ERR "lirc_dev: lirc_unregister_plugin: " ++ "minor (%d) device not registered!", minor); ++ mutex_unlock(&plugin_lock); ++ return -ENOENT; ++ } ++ ++ /* end up polling thread */ ++ if (ir->task) { ++ wake_up_process(ir->task); ++ kthread_stop(ir->task); ++ } ++ ++ dprintk("lirc_dev: plugin %s unregistered from minor number = %d\n", ++ ir->p.name, ir->p.minor); ++ ++ ir->attached = 0; ++ if (ir->open) { ++ dprintk(LOGHEAD "releasing opened plugin\n", ++ ir->p.name, ir->p.minor); ++ wake_up_interruptible(&ir->buf->wait_poll); ++ mutex_lock(&ir->buffer_lock); ++ ir->p.set_use_dec(ir->p.data); ++ module_put(ir->p.owner); ++ mutex_unlock(&ir->buffer_lock); ++ } else ++ cleanup(ir); ++ mutex_unlock(&plugin_lock); ++ ++/* ++ * Recent kernels should handle this autmatically by increasing/decreasing ++ * use count when a dependant module is loaded/unloaded. ++ */ ++ ++ return SUCCESS; ++} ++EXPORT_SYMBOL(lirc_unregister_plugin); ++ ++/* ++ * ++ */ ++static int irctl_open(struct inode *inode, struct file *file) ++{ ++ struct irctl *ir; ++ int retval; ++ ++ if (MINOR(inode->i_rdev) >= MAX_IRCTL_DEVICES) { ++ dprintk("lirc_dev [%d]: open result = -ENODEV\n", ++ MINOR(inode->i_rdev)); ++ return -ENODEV; ++ } ++ ++ ir = &irctls[MINOR(inode->i_rdev)]; ++ ++ dprintk(LOGHEAD "open called\n", ir->p.name, ir->p.minor); ++ ++ /* if the plugin has an open function use it instead */ ++ if (ir->p.fops && ir->p.fops->open) ++ return ir->p.fops->open(inode, file); ++ ++ if (mutex_lock_interruptible(&plugin_lock)) ++ return -ERESTARTSYS; ++ ++ if (ir->p.minor == NOPLUG) { ++ mutex_unlock(&plugin_lock); ++ dprintk(LOGHEAD "open result = -ENODEV\n", ++ ir->p.name, ir->p.minor); ++ return -ENODEV; ++ } ++ ++ if (ir->open) { ++ mutex_unlock(&plugin_lock); ++ dprintk(LOGHEAD "open result = -EBUSY\n", ++ ir->p.name, ir->p.minor); ++ return -EBUSY; ++ } ++ ++ /* there is no need for locking here because ir->open is 0 ++ * and lirc_thread isn't using buffer ++ * plugins which use irq's should allocate them on set_use_inc, ++ * so there should be no problem with those either. ++ */ ++ ir->buf->head = ir->buf->tail; ++ ir->buf->fill = 0; ++ ++ if (ir->p.owner != NULL && try_module_get(ir->p.owner)) { ++ ++ir->open; ++ retval = ir->p.set_use_inc(ir->p.data); ++ ++ if (retval != SUCCESS) { ++ module_put(ir->p.owner); ++ --ir->open; ++ } ++ } else { ++ if (ir->p.owner == NULL) ++ dprintk(LOGHEAD "no module owner!!!\n", ++ ir->p.name, ir->p.minor); ++ ++ retval = -ENODEV; ++ } ++ ++ dprintk(LOGHEAD "open result = %d\n", ir->p.name, ir->p.minor, retval); ++ mutex_unlock(&plugin_lock); ++ ++ return retval; ++} ++ ++/* ++ * ++ */ ++static int irctl_close(struct inode *inode, struct file *file) ++{ ++ struct irctl *ir = &irctls[MINOR(inode->i_rdev)]; ++ ++ dprintk(LOGHEAD "close called\n", ir->p.name, ir->p.minor); ++ ++ /* if the plugin has a close function use it instead */ ++ if (ir->p.fops && ir->p.fops->release) ++ return ir->p.fops->release(inode, file); ++ ++ if (mutex_lock_interruptible(&plugin_lock)) ++ return -ERESTARTSYS; ++ ++ --ir->open; ++ if (ir->attached) { ++ ir->p.set_use_dec(ir->p.data); ++ module_put(ir->p.owner); ++ } else { ++ cleanup(ir); ++ } ++ ++ mutex_unlock(&plugin_lock); ++ ++ return SUCCESS; ++} ++ ++/* ++ * ++ */ ++static unsigned int irctl_poll(struct file *file, poll_table *wait) ++{ ++ struct irctl *ir = &irctls[MINOR(file->f_dentry->d_inode->i_rdev)]; ++ unsigned int ret; ++ ++ dprintk(LOGHEAD "poll called\n", ir->p.name, ir->p.minor); ++ ++ /* if the plugin has a poll function use it instead */ ++ if (ir->p.fops && ir->p.fops->poll) ++ return ir->p.fops->poll(file, wait); ++ ++ mutex_lock(&ir->buffer_lock); ++ if (!ir->attached) { ++ mutex_unlock(&ir->buffer_lock); ++ return POLLERR; ++ } ++ ++ poll_wait(file, &ir->buf->wait_poll, wait); ++ ++ dprintk(LOGHEAD "poll result = %s\n", ++ ir->p.name, ir->p.minor, ++ lirc_buffer_empty(ir->buf) ? "0" : "POLLIN|POLLRDNORM"); ++ ++ ret = lirc_buffer_empty(ir->buf) ? 0 : (POLLIN|POLLRDNORM); ++ ++ mutex_unlock(&ir->buffer_lock); ++ return ret; ++} ++ ++/* ++ * ++ */ ++static int irctl_ioctl(struct inode *inode, struct file *file, ++ unsigned int cmd, unsigned long arg) ++{ ++ unsigned long mode; ++ int result; ++ struct irctl *ir = &irctls[MINOR(inode->i_rdev)]; ++ ++ dprintk(LOGHEAD "ioctl called (0x%x)\n", ++ ir->p.name, ir->p.minor, cmd); ++ ++ /* if the plugin has a ioctl function use it instead */ ++ if (ir->p.fops && ir->p.fops->ioctl) ++ return ir->p.fops->ioctl(inode, file, cmd, arg); ++ ++ if (ir->p.minor == NOPLUG || !ir->attached) { ++ dprintk(LOGHEAD "ioctl result = -ENODEV\n", ++ ir->p.name, ir->p.minor); ++ return -ENODEV; ++ } ++ ++ /* Give the plugin a chance to handle the ioctl */ ++ if (ir->p.ioctl) { ++ result = ir->p.ioctl(inode, file, cmd, arg); ++ if (result != -ENOIOCTLCMD) ++ return result; ++ } ++ /* The plugin can't handle cmd */ ++ result = SUCCESS; ++ ++ switch (cmd) { ++ case LIRC_GET_FEATURES: ++ result = put_user(ir->p.features, (unsigned long *)arg); ++ break; ++ case LIRC_GET_REC_MODE: ++ if (!(ir->p.features&LIRC_CAN_REC_MASK)) ++ return -ENOSYS; ++ ++ result = put_user(LIRC_REC2MODE ++ (ir->p.features&LIRC_CAN_REC_MASK), ++ (unsigned long *)arg); ++ break; ++ case LIRC_SET_REC_MODE: ++ if (!(ir->p.features&LIRC_CAN_REC_MASK)) ++ return -ENOSYS; ++ ++ result = get_user(mode, (unsigned long *)arg); ++ if (!result && !(LIRC_MODE2REC(mode) & ir->p.features)) ++ result = -EINVAL; ++ /* ++ * FIXME: We should actually set the mode somehow but ++ * for now, lirc_serial doesn't support mode changing either ++ */ ++ break; ++ case LIRC_GET_LENGTH: ++ result = put_user((unsigned long)ir->p.code_length, ++ (unsigned long *)arg); ++ break; ++ default: ++ result = -ENOIOCTLCMD; ++ } ++ ++ dprintk(LOGHEAD "ioctl result = %d\n", ++ ir->p.name, ir->p.minor, result); ++ ++ return result; ++} ++ ++/* ++ * ++ */ ++static ssize_t irctl_read(struct file *file, ++ char *buffer, ++ size_t length, ++ loff_t *ppos) ++{ ++ struct irctl *ir = &irctls[MINOR(file->f_dentry->d_inode->i_rdev)]; ++ unsigned char buf[ir->buf->chunk_size]; ++ int ret = 0, written = 0; ++ DECLARE_WAITQUEUE(wait, current); ++ ++ dprintk(LOGHEAD "read called\n", ir->p.name, ir->p.minor); ++ ++ /* if the plugin has a specific read function use it instead */ ++ if (ir->p.fops && ir->p.fops->read) ++ return ir->p.fops->read(file, buffer, length, ppos); ++ ++ if (mutex_lock_interruptible(&ir->buffer_lock)) ++ return -ERESTARTSYS; ++ if (!ir->attached) { ++ mutex_unlock(&ir->buffer_lock); ++ return -ENODEV; ++ } ++ ++ if (length % ir->buf->chunk_size) { ++ dprintk(LOGHEAD "read result = -EINVAL\n", ++ ir->p.name, ir->p.minor); ++ mutex_unlock(&ir->buffer_lock); ++ return -EINVAL; ++ } ++ ++ /* ++ * we add ourselves to the task queue before buffer check ++ * to avoid losing scan code (in case when queue is awaken somewhere ++ * beetwen while condition checking and scheduling) ++ */ ++ add_wait_queue(&ir->buf->wait_poll, &wait); ++ set_current_state(TASK_INTERRUPTIBLE); ++ ++ /* ++ * while we did't provide 'length' bytes, device is opened in blocking ++ * mode and 'copy_to_user' is happy, wait for data. ++ */ ++ while (written < length && ret == 0) { ++ if (lirc_buffer_empty(ir->buf)) { ++ /* According to the read(2) man page, 'written' can be ++ * returned as less than 'length', instead of blocking ++ * again, returning -EWOULDBLOCK, or returning ++ * -ERESTARTSYS */ ++ if (written) ++ break; ++ if (file->f_flags & O_NONBLOCK) { ++ ret = -EWOULDBLOCK; ++ break; ++ } ++ if (signal_pending(current)) { ++ ret = -ERESTARTSYS; ++ break; ++ } ++ schedule(); ++ set_current_state(TASK_INTERRUPTIBLE); ++ if (!ir->attached) { ++ ret = -ENODEV; ++ break; ++ } ++ } else { ++ lirc_buffer_read_1(ir->buf, buf); ++ ret = copy_to_user((void *)buffer+written, buf, ++ ir->buf->chunk_size); ++ written += ir->buf->chunk_size; ++ } ++ } ++ ++ remove_wait_queue(&ir->buf->wait_poll, &wait); ++ set_current_state(TASK_RUNNING); ++ mutex_unlock(&ir->buffer_lock); ++ ++ dprintk(LOGHEAD "read result = %s (%d)\n", ++ ir->p.name, ir->p.minor, ret ? "-EFAULT" : "OK", ret); ++ ++ return ret ? ret : written; ++} ++ ++ ++void *lirc_get_pdata(struct file *file) ++{ ++ void *data = NULL; ++ ++ if (file && file->f_dentry && file->f_dentry->d_inode && ++ file->f_dentry->d_inode->i_rdev) { ++ struct irctl *ir; ++ ir = &irctls[MINOR(file->f_dentry->d_inode->i_rdev)]; ++ data = ir->p.data; ++ } ++ ++ return data; ++} ++EXPORT_SYMBOL(lirc_get_pdata); ++ ++ ++static ssize_t irctl_write(struct file *file, const char *buffer, ++ size_t length, loff_t *ppos) ++{ ++ struct irctl *ir = &irctls[MINOR(file->f_dentry->d_inode->i_rdev)]; ++ ++ dprintk(LOGHEAD "write called\n", ir->p.name, ir->p.minor); ++ ++ /* if the plugin has a specific read function use it instead */ ++ if (ir->p.fops && ir->p.fops->write) ++ return ir->p.fops->write(file, buffer, length, ppos); ++ ++ if (!ir->attached) ++ return -ENODEV; ++ ++ return -EINVAL; ++} ++ ++ ++static struct file_operations fops = { ++ .read = irctl_read, ++ .write = irctl_write, ++ .poll = irctl_poll, ++ .ioctl = irctl_ioctl, ++ .open = irctl_open, ++ .release = irctl_close ++}; ++ ++ ++static int lirc_dev_init(void) ++{ ++ int i; ++ ++ for (i = 0; i < MAX_IRCTL_DEVICES; ++i) ++ init_irctl(&irctls[i]); ++ ++ if (register_chrdev(IRCTL_DEV_MAJOR, IRCTL_DEV_NAME, &fops)) { ++ printk(KERN_ERR "lirc_dev: register_chrdev failed\n"); ++ goto out; ++ } ++ ++ lirc_class = class_create(THIS_MODULE, "lirc"); ++ if (IS_ERR(lirc_class)) { ++ printk(KERN_ERR "lirc_dev: class_create failed\n"); ++ goto out_unregister; ++ } ++ ++ printk(KERN_INFO "lirc_dev: IR Remote Control driver registered, " ++ "major %d \n", IRCTL_DEV_MAJOR); ++ ++ return SUCCESS; ++ ++out_unregister: ++ /* unregister_chrdev returns void now */ ++ unregister_chrdev(IRCTL_DEV_MAJOR, IRCTL_DEV_NAME); ++out: ++ return -1; ++} ++ ++/* ---------------------------------------------------------------------- */ ++ ++#ifdef MODULE ++ ++/* ++ * ++ */ ++int init_module(void) ++{ ++ return lirc_dev_init(); ++} ++ ++/* ++ * ++ */ ++void cleanup_module(void) ++{ ++ /* unregister_chrdev returns void now */ ++ unregister_chrdev(IRCTL_DEV_MAJOR, IRCTL_DEV_NAME); ++ class_destroy(lirc_class); ++ dprintk("lirc_dev: module unloaded\n"); ++} ++ ++MODULE_DESCRIPTION("LIRC base driver module"); ++MODULE_AUTHOR("Artur Lipowski"); ++MODULE_LICENSE("GPL"); ++MODULE_ALIAS_CHARDEV_MAJOR(IRCTL_DEV_MAJOR); ++ ++module_param(debug, bool, 0644); ++MODULE_PARM_DESC(debug, "Enable debugging messages"); ++ ++#else /* not a MODULE */ ++subsys_initcall(lirc_dev_init); ++ ++#endif /* MODULE */ ++ ++/* ++ * Overrides for Emacs so that we follow Linus's tabbing style. ++ * --------------------------------------------------------------------------- ++ * Local variables: ++ * c-basic-offset: 8 ++ * End: ++ */ +diff --git a/drivers/input/lirc/lirc_dev.h b/drivers/input/lirc/lirc_dev.h +new file mode 100644 +index 0000000..920dd43 +--- /dev/null ++++ b/drivers/input/lirc/lirc_dev.h +@@ -0,0 +1,262 @@ ++/* ++ * LIRC base driver ++ * ++ * (L) by Artur Lipowski ++ * This code is licensed under GNU GPL ++ * ++ */ ++ ++#ifndef _LINUX_LIRC_DEV_H ++#define _LINUX_LIRC_DEV_H ++ ++#define MAX_IRCTL_DEVICES 4 ++#define BUFLEN 16 ++ ++/* #define LIRC_BUFF_POWER_OF_2 */ ++#ifdef LIRC_BUFF_POWER_OF_2 ++#define mod(n, div) ((n) & ((div) - 1)) ++#else ++#define mod(n, div) ((n) % (div)) ++#endif ++#include ++#include ++ ++struct lirc_buffer { ++ wait_queue_head_t wait_poll; ++ spinlock_t lock; ++ ++ unsigned char *data; ++ unsigned int chunk_size; ++ unsigned int size; /* in chunks */ ++ unsigned int fill; /* in chunks */ ++ int head, tail; /* in chunks */ ++ /* Using chunks instead of bytes pretends to simplify boundary checking ++ * And should allow for some performance fine tunning later */ ++}; ++static inline void _lirc_buffer_clear(struct lirc_buffer *buf) ++{ ++ buf->head = 0; ++ buf->tail = 0; ++ buf->fill = 0; ++} ++static inline int lirc_buffer_init(struct lirc_buffer *buf, ++ unsigned int chunk_size, ++ unsigned int size) ++{ ++ /* Adjusting size to the next power of 2 would allow for ++ * inconditional LIRC_BUFF_POWER_OF_2 optimization */ ++ init_waitqueue_head(&buf->wait_poll); ++ spin_lock_init(&buf->lock); ++ _lirc_buffer_clear(buf); ++ buf->chunk_size = chunk_size; ++ buf->size = size; ++ buf->data = kmalloc(size*chunk_size, GFP_KERNEL); ++ if (buf->data == NULL) ++ return -1; ++ memset(buf->data, 0, size*chunk_size); ++ return 0; ++} ++static inline void lirc_buffer_free(struct lirc_buffer *buf) ++{ ++ kfree(buf->data); ++ buf->data = NULL; ++ buf->head = 0; ++ buf->tail = 0; ++ buf->fill = 0; ++ buf->chunk_size = 0; ++ buf->size = 0; ++} ++static inline int lirc_buffer_full(struct lirc_buffer *buf) ++{ ++ return buf->fill >= buf->size; ++} ++static inline int lirc_buffer_empty(struct lirc_buffer *buf) ++{ ++ return !(buf->fill); ++} ++static inline int lirc_buffer_available(struct lirc_buffer *buf) ++{ ++ return buf->size - buf->fill; ++} ++static inline void lirc_buffer_lock(struct lirc_buffer *buf, ++ unsigned long *flags) ++{ ++ spin_lock_irqsave(&buf->lock, *flags); ++} ++static inline void lirc_buffer_unlock(struct lirc_buffer *buf, ++ unsigned long *flags) ++{ ++ spin_unlock_irqrestore(&buf->lock, *flags); ++} ++static inline void lirc_buffer_clear(struct lirc_buffer *buf) ++{ ++ unsigned long flags; ++ lirc_buffer_lock(buf, &flags); ++ _lirc_buffer_clear(buf); ++ lirc_buffer_unlock(buf, &flags); ++} ++static inline void _lirc_buffer_remove_1(struct lirc_buffer *buf) ++{ ++ buf->head = mod(buf->head+1, buf->size); ++ buf->fill -= 1; ++} ++static inline void lirc_buffer_remove_1(struct lirc_buffer *buf) ++{ ++ unsigned long flags; ++ lirc_buffer_lock(buf, &flags); ++ _lirc_buffer_remove_1(buf); ++ lirc_buffer_unlock(buf, &flags); ++} ++static inline void _lirc_buffer_read_1(struct lirc_buffer *buf, ++ unsigned char *dest) ++{ ++ memcpy(dest, &buf->data[buf->head*buf->chunk_size], buf->chunk_size); ++ buf->head = mod(buf->head+1, buf->size); ++ buf->fill -= 1; ++} ++static inline void lirc_buffer_read_1(struct lirc_buffer *buf, ++ unsigned char *dest) ++{ ++ unsigned long flags; ++ lirc_buffer_lock(buf, &flags); ++ _lirc_buffer_read_1(buf, dest); ++ lirc_buffer_unlock(buf, &flags); ++} ++static inline void _lirc_buffer_write_1(struct lirc_buffer *buf, ++ unsigned char *orig) ++{ ++ memcpy(&buf->data[buf->tail*buf->chunk_size], orig, buf->chunk_size); ++ buf->tail = mod(buf->tail+1, buf->size); ++ buf->fill++; ++} ++static inline void lirc_buffer_write_1(struct lirc_buffer *buf, ++ unsigned char *orig) ++{ ++ unsigned long flags; ++ lirc_buffer_lock(buf, &flags); ++ _lirc_buffer_write_1(buf, orig); ++ lirc_buffer_unlock(buf, &flags); ++} ++static inline void _lirc_buffer_write_n(struct lirc_buffer *buf, ++ unsigned char *orig, int count) ++{ ++ memcpy(&buf->data[buf->tail * buf->chunk_size], orig, ++ count * buf->chunk_size); ++ buf->tail = mod(buf->tail + count, buf->size); ++ buf->fill += count; ++} ++static inline void lirc_buffer_write_n(struct lirc_buffer *buf, ++ unsigned char *orig, int count) ++{ ++ unsigned long flags; ++ int space1; ++ ++ lirc_buffer_lock(buf, &flags); ++ if (buf->head > buf->tail) ++ space1 = buf->head - buf->tail; ++ else ++ space1 = buf->size - buf->tail; ++ ++ if (count > space1) { ++ _lirc_buffer_write_n(buf, orig, space1); ++ _lirc_buffer_write_n(buf, orig+(space1*buf->chunk_size), ++ count-space1); ++ } else { ++ _lirc_buffer_write_n(buf, orig, count); ++ } ++ lirc_buffer_unlock(buf, &flags); ++} ++ ++struct lirc_plugin { ++ char name[40]; ++ int minor; ++ int code_length; ++ int sample_rate; ++ unsigned long features; ++ void *data; ++ int (*add_to_buf) (void *data, struct lirc_buffer *buf); ++ wait_queue_head_t* (*get_queue) (void *data); ++ struct lirc_buffer *rbuf; ++ int (*set_use_inc) (void *data); ++ void (*set_use_dec) (void *data); ++ int (*ioctl) (struct inode *, struct file *, unsigned int, ++ unsigned long); ++ struct file_operations *fops; ++ struct device *dev; ++ struct module *owner; ++}; ++/* name: ++ * this string will be used for logs ++ * ++ * minor: ++ * indicates minor device (/dev/lirc) number for registered plugin ++ * if caller fills it with negative value, then the first free minor ++ * number will be used (if available) ++ * ++ * code_length: ++ * length of the remote control key code expressed in bits ++ * ++ * sample_rate: ++ * sample_rate equal to 0 means that no polling will be performed and ++ * add_to_buf will be triggered by external events (through task queue ++ * returned by get_queue) ++ * ++ * data: ++ * it may point to any plugin data and this pointer will be passed to ++ * all callback functions ++ * ++ * add_to_buf: ++ * add_to_buf will be called after specified period of the time or ++ * triggered by the external event, this behavior depends on value of ++ * the sample_rate this function will be called in user context. This ++ * routine should return 0 if data was added to the buffer and ++ * -ENODATA if none was available. This should add some number of bits ++ * evenly divisible by code_length to the buffer ++ * ++ * get_queue: ++ * this callback should return a pointer to the task queue which will ++ * be used for external event waiting ++ * ++ * rbuf: ++ * if not NULL, it will be used as a read buffer, you will have to ++ * write to the buffer by other means, like irq's (see also ++ * lirc_serial.c). ++ * ++ * set_use_inc: ++ * set_use_inc will be called after device is opened ++ * ++ * set_use_dec: ++ * set_use_dec will be called after device is closed ++ * ++ * ioctl: ++ * Some ioctl's can be directly handled by lirc_dev but will be ++ * forwared here if not NULL and only handled if it returns ++ * -ENOIOCTLCMD (see also lirc_serial.c). ++ * ++ * fops: ++ * file_operations for drivers which don't fit the current plugin model. ++ * ++ * owner: ++ * the module owning this struct ++ * ++ */ ++ ++ ++/* following functions can be called ONLY from user context ++ * ++ * returns negative value on error or minor number ++ * of the registered device if success ++ * contens of the structure pointed by p is copied ++ */ ++extern int lirc_register_plugin(struct lirc_plugin *p); ++ ++/* returns negative value on error or 0 if success ++*/ ++extern int lirc_unregister_plugin(int minor); ++ ++/* Returns the private data stored in the lirc_plugin ++ * associated with the given device file pointer. ++ */ ++void *lirc_get_pdata(struct file *file); ++ ++#endif +diff --git a/drivers/input/lirc/lirc_i2c.c b/drivers/input/lirc/lirc_i2c.c +new file mode 100644 +index 0000000..4714641 +--- /dev/null ++++ b/drivers/input/lirc/lirc_i2c.c +@@ -0,0 +1,639 @@ ++/* ++ * i2c IR lirc plugin for Hauppauge and Pixelview cards - new 2.3.x i2c stack ++ * ++ * Copyright (c) 2000 Gerd Knorr ++ * modified for PixelView (BT878P+W/FM) by ++ * Michal Kochanowicz ++ * Christoph Bartelmus ++ * modified for KNC ONE TV Station/Anubis Typhoon TView Tuner by ++ * Ulrich Mueller ++ * modified for Asus TV-Box and Creative/VisionTek BreakOut-Box by ++ * Stefan Jahn ++ * modified for inclusion into kernel sources by ++ * Jerome Brock ++ * modified for Leadtek Winfast PVR2000 by ++ * Thomas Reitmayr (treitmayr@yahoo.com) ++ * modified for Hauppauge HVR-1300 by ++ * Jan Frey (jfrey@gmx.de) ++ * ++ * parts are cut&pasted from the old lirc_haup.c driver ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ++ * ++ */ ++ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++ ++#include "lirc_dev.h" ++ ++struct IR { ++ struct lirc_plugin l; ++ struct i2c_client c; ++ int nextkey; ++ unsigned char b[3]; ++ unsigned char bits; ++ unsigned char flag; ++}; ++ ++/* ----------------------------------------------------------------------- */ ++ ++#define DEVICE_NAME "lirc_i2c" ++ ++/* ----------------------------------------------------------------------- */ ++/* insmod parameters */ ++ ++static int debug; /* debug output */ ++static int minor = -1; /* minor number */ ++ ++#define dprintk(fmt, args...) \ ++ do { \ ++ if (debug) \ ++ printk(KERN_DEBUG DEVICE_NAME ": " fmt, \ ++ ## args); \ ++ } while (0) ++ ++/* ----------------------------------------------------------------------- */ ++ ++static inline int reverse(int data, int bits) ++{ ++ int i; ++ int c; ++ ++ for (c = 0, i = 0; i < bits; i++) ++ c |= ((data & (1<c, keybuf, 1); ++ /* poll IR chip */ ++ if (i2c_master_recv(&ir->c, keybuf, sizeof(keybuf)) != sizeof(keybuf)) { ++ dprintk("read error\n"); ++ return -EIO; ++ } ++ ++ dprintk("key (0x%02x%02x%02x%02x)\n", ++ keybuf[0], keybuf[1], keybuf[2], keybuf[3]); ++ ++ /* key pressed ? */ ++ if (keybuf[2] == 0xff) ++ return -ENODATA; ++ ++ /* remove repeat bit */ ++ keybuf[2] &= 0x7f; ++ keybuf[3] |= 0x80; ++ ++ lirc_buffer_write_1(buf, keybuf); ++ return 0; ++} ++ ++static int add_to_buf_pcf8574(void *data, struct lirc_buffer *buf) ++{ ++ struct IR *ir = data; ++ int rc; ++ unsigned char all, mask; ++ unsigned char key; ++ ++ /* compute all valid bits (key code + pressed/release flag) */ ++ all = ir->bits | ir->flag; ++ ++ /* save IR writable mask bits */ ++ mask = i2c_smbus_read_byte(&ir->c) & ~all; ++ ++ /* send bit mask */ ++ rc = i2c_smbus_write_byte(&ir->c, (0xff & all) | mask); ++ ++ /* receive scan code */ ++ rc = i2c_smbus_read_byte(&ir->c); ++ ++ if (rc == -1) { ++ dprintk("%s read error\n", ir->c.name); ++ return -EIO; ++ } ++ ++ /* drop duplicate polls */ ++ if (ir->b[0] == (rc & all)) ++ return -ENODATA; ++ ++ ir->b[0] = rc & all; ++ ++ dprintk("%s key 0x%02X %s\n", ir->c.name, rc & ir->bits, ++ (rc & ir->flag) ? "released" : "pressed"); ++ ++ if (rc & ir->flag) { ++ /* ignore released buttons */ ++ return -ENODATA; ++ } ++ ++ /* set valid key code */ ++ key = rc & ir->bits; ++ lirc_buffer_write_1(buf, &key); ++ return 0; ++} ++ ++/* common for Hauppauge IR receivers */ ++static int add_to_buf_haup_common(void *data, struct lirc_buffer *buf, ++ unsigned char *keybuf, int size, int offset) ++{ ++ struct IR *ir = data; ++ __u16 code; ++ unsigned char codes[2]; ++ ++ /* poll IR chip */ ++ if (size == i2c_master_recv(&ir->c, keybuf, size)) { ++ ir->b[0] = keybuf[offset]; ++ ir->b[1] = keybuf[offset+1]; ++ ir->b[2] = keybuf[offset+2]; ++ dprintk("key (0x%02x/0x%02x)\n", ir->b[0], ir->b[1]); ++ } else { ++ dprintk("read error\n"); ++ /* keep last successfull read buffer */ ++ } ++ ++ /* key pressed ? */ ++ if ((ir->b[0] & 0x80) == 0) ++ return -ENODATA; ++ ++ /* look what we have */ ++ code = (((__u16)ir->b[0]&0x7f)<<6) | (ir->b[1]>>2); ++ ++ codes[0] = (code >> 8) & 0xff; ++ codes[1] = code & 0xff; ++ ++ /* return it */ ++ lirc_buffer_write_1(buf, codes); ++ return 0; ++} ++ ++/* specific for the Hauppauge PVR150 IR receiver */ ++static int add_to_buf_haup_pvr150(void *data, struct lirc_buffer *buf) ++{ ++ unsigned char keybuf[6]; ++ /* fetch 6 bytes, first relevant is at offset 3 */ ++ return add_to_buf_haup_common(data, buf, keybuf, 6, 3); ++} ++ ++/* used for all Hauppauge IR receivers but the PVR150 */ ++static int add_to_buf_haup(void *data, struct lirc_buffer *buf) ++{ ++ unsigned char keybuf[3]; ++ /* fetch 3 bytes, first relevant is at offset 0 */ ++ return add_to_buf_haup_common(data, buf, keybuf, 3, 0); ++} ++ ++ ++static int add_to_buf_pvr2000(void *data, struct lirc_buffer *buf) ++{ ++ struct IR *ir = data; ++ unsigned char key; ++ s32 flags; ++ s32 code; ++ ++ /* poll IR chip */ ++ flags = i2c_smbus_read_byte_data(&ir->c, 0x10); ++ if (-1 == flags) { ++ dprintk("read error\n"); ++ return -ENODATA; ++ } ++ /* key pressed ? */ ++ if (0 == (flags & 0x80)) ++ return -ENODATA; ++ ++ /* read actual key code */ ++ code = i2c_smbus_read_byte_data(&ir->c, 0x00); ++ if (-1 == code) { ++ dprintk("read error\n"); ++ return -ENODATA; ++ } ++ ++ key = code & 0xFF; ++ ++ dprintk("IR Key/Flags: (0x%02x/0x%02x)\n", key, flags & 0xFF); ++ ++ /* return it */ ++ lirc_buffer_write_1(buf, &key); ++ return 0; ++} ++ ++static int add_to_buf_pixelview(void *data, struct lirc_buffer *buf) ++{ ++ struct IR *ir = data; ++ unsigned char key; ++ ++ /* poll IR chip */ ++ if (1 != i2c_master_recv(&ir->c, &key, 1)) { ++ dprintk("read error\n"); ++ return -1; ++ } ++ dprintk("key %02x\n", key); ++ ++ /* return it */ ++ lirc_buffer_write_1(buf, &key); ++ return 0; ++} ++ ++static int add_to_buf_pv951(void *data, struct lirc_buffer *buf) ++{ ++ struct IR *ir = data; ++ unsigned char key; ++ unsigned char codes[4]; ++ ++ /* poll IR chip */ ++ if (1 != i2c_master_recv(&ir->c, &key, 1)) { ++ dprintk("read error\n"); ++ return -ENODATA; ++ } ++ /* ignore 0xaa */ ++ if (key == 0xaa) ++ return -ENODATA; ++ dprintk("key %02x\n", key); ++ ++ codes[0] = 0x61; ++ codes[1] = 0xD6; ++ codes[2] = reverse(key, 8); ++ codes[3] = (~codes[2])&0xff; ++ ++ lirc_buffer_write_1(buf, codes); ++ return 0; ++} ++ ++static int add_to_buf_knc1(void *data, struct lirc_buffer *buf) ++{ ++ static unsigned char last_key = 0xFF; ++ struct IR *ir = data; ++ unsigned char key; ++ ++ /* poll IR chip */ ++ if (1 != i2c_master_recv(&ir->c, &key, 1)) { ++ dprintk("read error\n"); ++ return -ENODATA; ++ } ++ ++ /* it seems that 0xFE indicates that a button is still hold ++ down, while 0xFF indicates that no button is hold ++ down. 0xFE sequences are sometimes interrupted by 0xFF */ ++ ++ dprintk("key %02x\n", key); ++ ++ if (key == 0xFF) ++ return -ENODATA; ++ ++ if (key == 0xFE) ++ key = last_key; ++ ++ last_key = key; ++ lirc_buffer_write_1(buf, &key); ++ ++ return 0; ++} ++ ++static int set_use_inc(void *data) ++{ ++ struct IR *ir = data; ++ ++ /* lock bttv in memory while /dev/lirc is in use */ ++ i2c_use_client(&ir->c); ++ ++ return 0; ++} ++ ++static void set_use_dec(void *data) ++{ ++ struct IR *ir = data; ++ ++ i2c_release_client(&ir->c); ++} ++ ++static struct lirc_plugin lirc_template = { ++ .name = "lirc_i2c", ++ .set_use_inc = set_use_inc, ++ .set_use_dec = set_use_dec, ++ .dev = NULL, ++ .owner = THIS_MODULE, ++}; ++ ++/* ----------------------------------------------------------------------- */ ++ ++static int ir_attach(struct i2c_adapter *adap, int addr, ++ unsigned short flags, int kind); ++static int ir_detach(struct i2c_client *client); ++static int ir_probe(struct i2c_adapter *adap); ++static int ir_command(struct i2c_client *client, unsigned int cmd, void *arg); ++ ++static struct i2c_driver driver = { ++ .driver = { ++ .owner = THIS_MODULE, ++ .name = "i2c ir driver", ++ }, ++ .attach_adapter = ir_probe, ++ .detach_client = ir_detach, ++ .command = ir_command, ++}; ++ ++static struct i2c_client client_template = { ++ .name = "unset", ++ .driver = &driver ++}; ++ ++static int ir_attach(struct i2c_adapter *adap, int addr, ++ unsigned short flags, int kind) ++{ ++ struct IR *ir; ++ int err; ++ ++ client_template.adapter = adap; ++ client_template.addr = addr; ++ ++ ir = kmalloc(sizeof(struct IR), GFP_KERNEL); ++ if (!ir) ++ return -ENOMEM; ++ memcpy(&ir->l, &lirc_template, sizeof(struct lirc_plugin)); ++ memcpy(&ir->c, &client_template, sizeof(struct i2c_client)); ++ ++ ir->c.adapter = adap; ++ ir->c.addr = addr; ++ i2c_set_clientdata(&ir->c, ir); ++ ir->l.data = ir; ++ ir->l.minor = minor; ++ ir->l.sample_rate = 10; ++ ir->nextkey = -1; ++ ++ switch (addr) { ++ case 0x64: ++ strlcpy(ir->c.name, "Pixelview IR", I2C_NAME_SIZE); ++ ir->l.code_length = 8; ++ ir->l.add_to_buf = add_to_buf_pixelview; ++ break; ++ case 0x4b: ++ strlcpy(ir->c.name, "PV951 IR", I2C_NAME_SIZE); ++ ir->l.code_length = 32; ++ ir->l.add_to_buf = add_to_buf_pv951; ++ break; ++ case 0x71: ++ if (adap->id == I2C_HW_B_BT848 || ++ adap->id == I2C_HW_B_CX2341X) { ++ /* The PVR150 IR receiver uses the same protocol as ++ * other Hauppauge cards, but the data flow is ++ * different, so we need to deal with it by its own. */ ++ strlcpy(ir->c.name, "Hauppauge PVR150", I2C_NAME_SIZE); ++ } else /* I2C_HW_B_CX2388x */ ++ strlcpy(ir->c.name, "Hauppauge HVR1300", I2C_NAME_SIZE); ++ ir->l.code_length = 13; ++ ir->l.add_to_buf = add_to_buf_haup_pvr150; ++ break; ++ case 0x6b: ++ strlcpy(ir->c.name, "Adaptec IR", I2C_NAME_SIZE); ++ ir->l.code_length = 32; ++ ir->l.add_to_buf = add_to_buf_adap; ++ break; ++ case 0x18: ++ case 0x1a: ++ if (adap->id == I2C_HW_B_BT848 || ++ adap->id == I2C_HW_B_CX2341X) { ++ strlcpy(ir->c.name, "Hauppauge IR", I2C_NAME_SIZE); ++ ir->l.code_length = 13; ++ ir->l.add_to_buf = add_to_buf_haup; ++ } else { /* I2C_HW_B_CX2388x */ ++ strlcpy(ir->c.name, "Leadtek IR", I2C_NAME_SIZE); ++ ir->l.code_length = 8; ++ ir->l.add_to_buf = add_to_buf_pvr2000; ++ } ++ break; ++ case 0x30: ++ strlcpy(ir->c.name, "KNC ONE IR", I2C_NAME_SIZE); ++ ir->l.code_length = 8; ++ ir->l.add_to_buf = add_to_buf_knc1; ++ break; ++ case 0x21: ++ case 0x23: ++ strlcpy(ir->c.name, "TV-Box IR", I2C_NAME_SIZE); ++ ir->l.code_length = 8; ++ ir->l.add_to_buf = add_to_buf_pcf8574; ++ ir->bits = flags & 0xff; ++ ir->flag = (flags >> 8) & 0xff; ++ break; ++ default: ++ /* shouldn't happen */ ++ printk("lirc_i2c: Huh? unknown i2c address (0x%02x)?\n", addr); ++ kfree(ir); ++ return -1; ++ } ++ printk(KERN_INFO "lirc_i2c: chip 0x%x found @ 0x%02x (%s)\n", ++ adap->id, addr, ir->c.name); ++ ++ /* register device */ ++ err = i2c_attach_client(&ir->c); ++ if (err) { ++ kfree(ir); ++ return err; ++ } ++ ir->l.minor = lirc_register_plugin(&ir->l); ++ ++ return 0; ++} ++ ++static int ir_detach(struct i2c_client *client) ++{ ++ struct IR *ir = i2c_get_clientdata(client); ++ ++ /* unregister device */ ++ lirc_unregister_plugin(ir->l.minor); ++ i2c_detach_client(&ir->c); ++ ++ /* free memory */ ++ kfree(ir); ++ return 0; ++} ++ ++static int ir_probe(struct i2c_adapter *adap) ++{ ++ /* The external IR receiver is at i2c address 0x34 (0x35 for ++ * reads). Future Hauppauge cards will have an internal ++ * receiver at 0x30 (0x31 for reads). In theory, both can be ++ * fitted, and Hauppauge suggest an external overrides an ++ * internal. ++ * ++ * That's why we probe 0x1a (~0x34) first. CB ++ * ++ * The i2c address for the Hauppauge PVR-150 card is 0xe2, ++ * so we need to probe 0x71 as well. */ ++ ++ static const int probe[] = { ++ 0x1a, /* Hauppauge IR external */ ++ 0x18, /* Hauppauge IR internal */ ++ 0x71, /* Hauppauge IR (PVR150) */ ++ 0x4b, /* PV951 IR */ ++ 0x64, /* Pixelview IR */ ++ 0x30, /* KNC ONE IR */ ++ 0x6b, /* Adaptec IR */ ++ -1}; ++ ++ static const int probe_cx88[] = { ++ 0x18, /* Leadtek Winfast PVR2000 */ ++ 0x71, /* Hauppauge HVR-IR */ ++ -1}; ++ ++ struct i2c_client c; ++ char buf; ++ int i, rc; ++ ++ if (adap->id == I2C_HW_B_BT848 || ++ adap->id == I2C_HW_B_CX2341X) { ++ memset(&c, 0, sizeof(c)); ++ c.adapter = adap; ++ for (i = 0; -1 != probe[i]; i++) { ++ c.addr = probe[i]; ++ rc = i2c_master_recv(&c, &buf, 1); ++ dprintk("probe 0x%02x @ %s: %s\n", ++ probe[i], adap->name, ++ (1 == rc) ? "yes" : "no"); ++ if (1 == rc) ++ ir_attach(adap, probe[i], 0, 0); ++ } ++ } ++ ++ /* Leadtek Winfast PVR2000 or Hauppauge HVR-1300 */ ++ else if (adap->id == I2C_HW_B_CX2388x) { ++ memset(&c, 0, sizeof(c)); ++ c.adapter = adap; ++ for (i = 0; -1 != probe_cx88[i]; i++) { ++ c.addr = probe_cx88[i]; ++ rc = i2c_master_recv(&c, &buf, 1); ++ dprintk("probe 0x%02x @ %s: %s\n", ++ c.addr, adap->name, ++ (1 == rc) ? "yes" : "no"); ++ if (1 == rc) ++ ir_attach(adap, c.addr, 0, 0); ++ } ++ } ++ ++ /* Asus TV-Box and Creative/VisionTek BreakOut-Box (PCF8574) */ ++ else if (adap->id == I2C_HW_B_RIVA) { ++ /* addresses to probe; ++ leave 0x24 and 0x25 because SAA7113H possibly uses it ++ 0x21 and 0x22 possibly used by SAA7108E ++ Asus: 0x21 is a correct address (channel 1 of PCF8574) ++ Creative: 0x23 is a correct address (channel 3 of PCF8574) ++ VisionTek: 0x23 is a correct address (channel 3 of PCF8574) ++ */ ++ static const int pcf_probe[] = { 0x20, 0x21, 0x22, 0x23, ++ 0x24, 0x25, 0x26, 0x27, -1 }; ++ int ret1, ret2, ret3, ret4; ++ unsigned char bits = 0, flag = 0; ++ ++ memset(&c, 0, sizeof(c)); ++ c.adapter = adap; ++ for (i = 0; -1 != pcf_probe[i]; i++) { ++ c.addr = pcf_probe[i]; ++ ret1 = i2c_smbus_write_byte(&c, 0xff); ++ ret2 = i2c_smbus_read_byte(&c); ++ ret3 = i2c_smbus_write_byte(&c, 0x00); ++ ret4 = i2c_smbus_read_byte(&c); ++ ++ /* ensure that the writable bitmask works correctly */ ++ rc = 0; ++ if (ret1 != -1 && ret2 != -1 && ++ ret3 != -1 && ret4 != -1) { ++ /* in the Asus TV-Box: bit 1-0 */ ++ if (((ret2 & 0x03) == 0x03) && ++ ((ret4 & 0x03) == 0x00)) { ++ bits = (unsigned char) ~0x07; ++ flag = 0x04; ++ rc = 1; ++ } ++ /* in the Creative/VisionTek BreakOut-Box: bit 7-6 */ ++ if (((ret2 & 0xc0) == 0xc0) && ++ ((ret4 & 0xc0) == 0x00)) { ++ bits = (unsigned char) ~0xe0; ++ flag = 0x20; ++ rc = 1; ++ } ++ } ++ dprintk("probe 0x%02x @ %s: %s\n", ++ c.addr, adap->name, rc ? "yes" : "no"); ++ if (rc) ++ ir_attach(adap, pcf_probe[i], ++ bits|(flag<<8), 0); ++ } ++ } ++ ++ return 0; ++} ++ ++static int ir_command(struct i2c_client *client, unsigned int cmd, void *arg) ++{ ++ /* nothing */ ++ return 0; ++} ++ ++/* ----------------------------------------------------------------------- */ ++#ifdef MODULE ++ ++int init_module(void) ++{ ++ request_module("bttv"); ++ request_module("rivatv"); ++ request_module("ivtv"); ++ request_module("cx8800"); ++ i2c_add_driver(&driver); ++ return 0; ++} ++ ++void cleanup_module(void) ++{ ++ i2c_del_driver(&driver); ++} ++ ++MODULE_DESCRIPTION("Infrared receiver driver for Hauppauge and " ++ "Pixelview cards (i2c stack)"); ++MODULE_AUTHOR("Gerd Knorr, Michal Kochanowicz, Christoph Bartelmus, " ++ "Ulrich Mueller, Stefan Jahn, Jerome Brock"); ++MODULE_LICENSE("GPL"); ++ ++module_param(minor, int, 0444); ++MODULE_PARM_DESC(minor, "Preferred minor device number"); ++ ++module_param(debug, bool, 0644); ++MODULE_PARM_DESC(debug, "Enable debugging messages"); ++ ++#endif /* MODULE */ ++ ++/* ++ * Overrides for Emacs so that we follow Linus's tabbing style. ++ * --------------------------------------------------------------------------- ++ * Local variables: ++ * c-basic-offset: 8 ++ * End: ++ */ +diff --git a/drivers/input/lirc/lirc_igorplugusb.c b/drivers/input/lirc/lirc_igorplugusb.c +new file mode 100644 +index 0000000..ab9bdd6 +--- /dev/null ++++ b/drivers/input/lirc/lirc_igorplugusb.c +@@ -0,0 +1,619 @@ ++/* lirc_igorplugusb - USB remote support for LIRC ++ * ++ * Supports the standard homebrew IgorPlugUSB receiver with Igor's firmware. ++ * See http://www.cesko.host.sk/IgorPlugUSB/IgorPlug-USB%20(AVR)_eng.htm ++ * ++ * The device can only record bursts of up to 36 pulses/spaces. ++ * Works fine with RC5. Longer commands lead to device buffer overrun. ++ * (Maybe a better firmware or a microcontroller with more ram can help?) ++ * ++ * Version 0.1 [beta status] ++ * ++ * Copyright (C) 2004 Jan M. Hochstein ++ * ++ * ++ * This driver was derived from: ++ * Paul Miller ++ * "lirc_atiusb" module ++ * Vladimir Dergachev 's 2002 ++ * "USB ATI Remote support" (input device) ++ * Adrian Dewhurst 's 2002 ++ * "USB StreamZap remote driver" (LIRC) ++ * Artur Lipowski 's 2002 ++ * "lirc_dev" and "lirc_gpio" LIRC modules ++ * ++ */ ++ ++/* ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ++ * ++ */ ++ ++#include ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "lirc.h" ++#include "lirc_dev.h" ++ ++ ++/* module identification */ ++#define DRIVER_VERSION "0.1" ++#define DRIVER_AUTHOR \ ++ "Jan M. Hochstein " ++#define DRIVER_DESC "USB remote driver for LIRC" ++#define DRIVER_NAME "lirc_igorplugusb" ++ ++/* debugging support */ ++#ifdef CONFIG_USB_DEBUG ++static int debug = 1; ++#else ++static int debug; ++#endif ++ ++#define dprintk(fmt, args...) \ ++ do { \ ++ if (debug) \ ++ printk(KERN_DEBUG fmt, ## args); \ ++ } while (0) ++ ++/* general constants */ ++#define SUCCESS 0 ++ ++/* One mode2 pulse/space has 4 bytes. */ ++#define CODE_LENGTH sizeof(int) ++ ++/* Igor's firmware cannot record bursts longer than 36. */ ++#define DEVICE_BUFLEN 36 ++ ++/** Header at the beginning of the device's buffer: ++ unsigned char data_length ++ unsigned char data_start (!=0 means ring-buffer overrun) ++ unsigned char counter (incremented by each burst) ++**/ ++#define DEVICE_HEADERLEN 3 ++ ++/* This is for the gap */ ++#define ADDITIONAL_LIRC_BYTES 2 ++ ++/* times to poll per second */ ++#define SAMPLE_RATE 100 ++static int sample_rate = SAMPLE_RATE; ++ ++ ++/**** Igor's USB Request Codes */ ++ ++#define SET_INFRABUFFER_EMPTY 1 ++/** ++ * Params: none ++ * Answer: empty ++ * ++**/ ++ ++#define GET_INFRACODE 2 ++/** ++ * Params: ++ * wValue: offset to begin reading infra buffer ++ * ++ * Answer: infra data ++ * ++**/ ++ ++#define SET_DATAPORT_DIRECTION 3 ++/** ++ * Params: ++ * wValue: (byte) 1 bit for each data port pin (0=in, 1=out) ++ * ++ * Answer: empty ++ * ++**/ ++ ++#define GET_DATAPORT_DIRECTION 4 ++/** ++ * Params: none ++ * ++ * Answer: (byte) 1 bit for each data port pin (0=in, 1=out) ++ * ++**/ ++ ++#define SET_OUT_DATAPORT 5 ++/** ++ * Params: ++ * wValue: byte to write to output data port ++ * ++ * Answer: empty ++ * ++**/ ++ ++#define GET_OUT_DATAPORT 6 ++/** ++ * Params: none ++ * ++ * Answer: least significant 3 bits read from output data port ++ * ++**/ ++ ++#define GET_IN_DATAPORT 7 ++/** ++ * Params: none ++ * ++ * Answer: least significant 3 bits read from input data port ++ * ++**/ ++ ++#define READ_EEPROM 8 ++/** ++ * Params: ++ * wValue: offset to begin reading EEPROM ++ * ++ * Answer: EEPROM bytes ++ * ++**/ ++ ++#define WRITE_EEPROM 9 ++/** ++ * Params: ++ * wValue: offset to EEPROM byte ++ * wIndex: byte to write ++ * ++ * Answer: empty ++ * ++**/ ++ ++#define SEND_RS232 10 ++/** ++ * Params: ++ * wValue: byte to send ++ * ++ * Answer: empty ++ * ++**/ ++ ++#define RECV_RS232 11 ++/** ++ * Params: none ++ * ++ * Answer: byte received ++ * ++**/ ++ ++#define SET_RS232_BAUD 12 ++/** ++ * Params: ++ * wValue: byte to write to UART bit rate register (UBRR) ++ * ++ * Answer: empty ++ * ++**/ ++ ++#define GET_RS232_BAUD 13 ++/** ++ * Params: none ++ * ++ * Answer: byte read from UART bit rate register (UBRR) ++ * ++**/ ++ ++ ++/* data structure for each usb remote */ ++struct irctl { ++ ++ /* usb */ ++ struct usb_device *usbdev; ++ struct urb *urb_in; ++ int devnum; ++ ++ unsigned char *buf_in; ++ unsigned int len_in; ++ int in_space; ++ struct timeval last_time; ++ ++ dma_addr_t dma_in; ++ ++ /* lirc */ ++ struct lirc_plugin *p; ++ ++ /* handle sending (init strings) */ ++ int send_flags; ++ wait_queue_head_t wait_out; ++}; ++ ++static int unregister_from_lirc(struct irctl *ir) ++{ ++ struct lirc_plugin *p = ir->p; ++ int devnum; ++ ++ if (!ir->p) ++ return -EINVAL; ++ ++ devnum = ir->devnum; ++ dprintk(DRIVER_NAME "[%d]: unregister from lirc called\n", devnum); ++ ++ lirc_unregister_plugin(p->minor); ++ ++ printk(DRIVER_NAME "[%d]: usb remote disconnected\n", devnum); ++ ++ lirc_buffer_free(p->rbuf); ++ kfree(p->rbuf); ++ kfree(p); ++ kfree(ir); ++ ir->p = NULL; ++ return SUCCESS; ++} ++ ++static int set_use_inc(void *data) ++{ ++ struct irctl *ir = data; ++ ++ if (!ir) { ++ printk(DRIVER_NAME "[?]: set_use_inc called with no context\n"); ++ return -EIO; ++ } ++ dprintk(DRIVER_NAME "[%d]: set use inc\n", ir->devnum); ++ ++ if (!ir->usbdev) ++ return -ENODEV; ++ ++ return SUCCESS; ++} ++ ++static void set_use_dec(void *data) ++{ ++ struct irctl *ir = data; ++ ++ if (!ir) { ++ printk(DRIVER_NAME "[?]: set_use_dec called with no context\n"); ++ return; ++ } ++ dprintk(DRIVER_NAME "[%d]: set use dec\n", ir->devnum); ++} ++ ++ ++/** ++ * Called in user context. ++ * return 0 if data was added to the buffer and ++ * -ENODATA if none was available. This should add some number of bits ++ * evenly divisible by code_length to the buffer ++**/ ++static int usb_remote_poll(void *data, struct lirc_buffer *buf) ++{ ++ int ret; ++ struct irctl *ir = (struct irctl *)data; ++ ++ if (!ir->usbdev) /* Has the device been removed? */ ++ return -ENODEV; ++ ++ memset(ir->buf_in, 0, ir->len_in); ++ ++ ret = usb_control_msg( ++ ir->usbdev, usb_rcvctrlpipe(ir->usbdev, 0), ++ GET_INFRACODE, USB_TYPE_VENDOR|USB_DIR_IN, ++ 0/* offset */, /*unused*/0, ++ ir->buf_in, ir->len_in, ++ /*timeout*/HZ * USB_CTRL_GET_TIMEOUT); ++ if (ret > 0) { ++ int i = DEVICE_HEADERLEN; ++ int code, timediff; ++ struct timeval now; ++ ++ if (ret <= 1) /* ACK packet has 1 byte --> ignore */ ++ return -ENODATA; ++ ++ dprintk(DRIVER_NAME ": Got %d bytes. Header: %02x %02x %02x\n", ++ ret, ir->buf_in[0], ir->buf_in[1], ir->buf_in[2]); ++ ++ if (ir->buf_in[2] != 0) { ++ printk(DRIVER_NAME "[%d]: Device buffer overrun.\n", ++ ir->devnum); ++ /* start at earliest byte */ ++ i = DEVICE_HEADERLEN + ir->buf_in[2]; ++ /* where are we now? space, gap or pulse? */ ++ } ++ ++ do_gettimeofday(&now); ++ timediff = now.tv_sec - ir->last_time.tv_sec; ++ if (timediff + 1 > PULSE_MASK / 1000000) ++ timediff = PULSE_MASK; ++ else { ++ timediff *= 1000000; ++ timediff += now.tv_usec - ir->last_time.tv_usec; ++ } ++ ir->last_time.tv_sec = now.tv_sec; ++ ir->last_time.tv_usec = now.tv_usec; ++ ++ /* create leading gap */ ++ code = timediff; ++ lirc_buffer_write_n(buf, (unsigned char *)&code, 1); ++ ir->in_space = 1; /* next comes a pulse */ ++ ++ /* MODE2: pulse/space (PULSE_BIT) in 1us units */ ++ ++ while (i < ret) { ++ /* 1 Igor-tick = 85.333333 us */ ++ code = (unsigned int)ir->buf_in[i] * 85 ++ + (unsigned int)ir->buf_in[i] / 3; ++ if (ir->in_space) ++ code |= PULSE_BIT; ++ lirc_buffer_write_n(buf, (unsigned char *)&code, 1); ++ /* 1 chunk = CODE_LENGTH bytes */ ++ ir->in_space ^= 1; ++ ++i; ++ } ++ ++ ret = usb_control_msg( ++ ir->usbdev, usb_rcvctrlpipe(ir->usbdev, 0), ++ SET_INFRABUFFER_EMPTY, USB_TYPE_VENDOR|USB_DIR_IN, ++ /*unused*/0, /*unused*/0, ++ /*dummy*/ir->buf_in, /*dummy*/ir->len_in, ++ /*timeout*/HZ * USB_CTRL_GET_TIMEOUT); ++ if (ret < 0) ++ printk(DRIVER_NAME "[%d]: SET_INFRABUFFER_EMPTY: " ++ "error %d\n", ir->devnum, ret); ++ return SUCCESS; ++ } else ++ printk(DRIVER_NAME "[%d]: GET_INFRACODE: error %d\n", ++ ir->devnum, ret); ++ ++ return -ENODATA; ++} ++ ++ ++ ++static int usb_remote_probe(struct usb_interface *intf, ++ const struct usb_device_id *id) ++{ ++ struct usb_device *dev = NULL; ++ struct usb_host_interface *idesc = NULL; ++ struct usb_host_endpoint *ep_ctl2; ++ struct irctl *ir = NULL; ++ struct lirc_plugin *plugin = NULL; ++ struct lirc_buffer *rbuf = NULL; ++ int devnum, pipe, maxp, bytes_in_key; ++ int minor = 0; ++ char buf[63], name[128] = ""; ++ int mem_failure = 0; ++ int ret; ++ ++ dprintk(DRIVER_NAME ": usb probe called.\n"); ++ ++ dev = interface_to_usbdev(intf); ++ ++ idesc = intf->cur_altsetting; /* in 2.6.6 */ ++ ++ if (idesc->desc.bNumEndpoints != 1) ++ return -ENODEV; ++ ep_ctl2 = idesc->endpoint; ++ if (((ep_ctl2->desc.bEndpointAddress & USB_ENDPOINT_DIR_MASK) ++ != USB_DIR_IN) ++ || (ep_ctl2->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ++ != USB_ENDPOINT_XFER_CONTROL) ++ return -ENODEV; ++ pipe = usb_rcvctrlpipe(dev, ep_ctl2->desc.bEndpointAddress); ++ devnum = dev->devnum; ++ maxp = usb_maxpacket(dev, pipe, usb_pipeout(pipe)); ++ ++ bytes_in_key = CODE_LENGTH; ++ ++ dprintk(DRIVER_NAME "[%d]: bytes_in_key=%d maxp=%d\n", ++ devnum, bytes_in_key, maxp); ++ ++ ++ /* allocate kernel memory */ ++ mem_failure = 0; ++ ir = kmalloc(sizeof(struct irctl), GFP_KERNEL); ++ if (!ir) { ++ mem_failure = 1; ++ goto mem_failure_switch; ++ } ++ ++ memset(ir, 0, sizeof(struct irctl)); ++ ++ plugin = kmalloc(sizeof(struct lirc_plugin), GFP_KERNEL); ++ if (!plugin) { ++ mem_failure = 2; ++ goto mem_failure_switch; ++ } ++ ++ rbuf = kmalloc(sizeof(struct lirc_buffer), GFP_KERNEL); ++ if (!rbuf) { ++ mem_failure = 3; ++ goto mem_failure_switch; ++ } ++ ++ if (lirc_buffer_init(rbuf, bytes_in_key, ++ DEVICE_BUFLEN+ADDITIONAL_LIRC_BYTES)) { ++ mem_failure = 4; ++ goto mem_failure_switch; ++ } ++ ++ ir->buf_in = usb_buffer_alloc(dev, ++ DEVICE_BUFLEN+DEVICE_HEADERLEN, ++ GFP_ATOMIC, &ir->dma_in); ++ if (!ir->buf_in) { ++ mem_failure = 5; ++ goto mem_failure_switch; ++ } ++ ++ memset(plugin, 0, sizeof(struct lirc_plugin)); ++ ++ strcpy(plugin->name, DRIVER_NAME " "); ++ plugin->minor = -1; ++ plugin->code_length = bytes_in_key*8; /* in bits */ ++ plugin->features = LIRC_CAN_REC_MODE2; ++ plugin->data = ir; ++ plugin->rbuf = rbuf; ++ plugin->set_use_inc = &set_use_inc; ++ plugin->set_use_dec = &set_use_dec; ++ plugin->sample_rate = sample_rate; /* per second */ ++ plugin->add_to_buf = &usb_remote_poll; ++ plugin->dev = &dev->dev; ++ plugin->owner = THIS_MODULE; ++ ++ init_waitqueue_head(&ir->wait_out); ++ ++ minor = lirc_register_plugin(plugin); ++ if (minor < 0) ++ mem_failure = 9; ++ ++mem_failure_switch: ++ ++ /* free allocated memory in case of failure */ ++ switch (mem_failure) { ++ case 9: ++ usb_buffer_free(dev, DEVICE_BUFLEN+DEVICE_HEADERLEN, ++ ir->buf_in, ir->dma_in); ++ case 5: ++ lirc_buffer_free(rbuf); ++ case 4: ++ kfree(rbuf); ++ case 3: ++ kfree(plugin); ++ case 2: ++ kfree(ir); ++ case 1: ++ printk(DRIVER_NAME "[%d]: out of memory (code=%d)\n", ++ devnum, mem_failure); ++ return -ENOMEM; ++ } ++ ++ plugin->minor = minor; ++ ir->p = plugin; ++ ir->devnum = devnum; ++ ir->usbdev = dev; ++ ir->len_in = DEVICE_BUFLEN+DEVICE_HEADERLEN; ++ ir->in_space = 1; /* First mode2 event is a space. */ ++ do_gettimeofday(&ir->last_time); ++ ++ if (dev->descriptor.iManufacturer ++ && usb_string(dev, dev->descriptor.iManufacturer, buf, 63) > 0) ++ strncpy(name, buf, 128); ++ if (dev->descriptor.iProduct ++ && usb_string(dev, dev->descriptor.iProduct, buf, 63) > 0) ++ snprintf(name, 128, "%s %s", name, buf); ++ printk(DRIVER_NAME "[%d]: %s on usb%d:%d\n", devnum, name, ++ dev->bus->busnum, devnum); ++ ++ /* clear device buffer */ ++ ret = usb_control_msg(ir->usbdev, usb_rcvctrlpipe(ir->usbdev, 0), ++ SET_INFRABUFFER_EMPTY, USB_TYPE_VENDOR|USB_DIR_IN, ++ /*unused*/0, /*unused*/0, ++ /*dummy*/ir->buf_in, /*dummy*/ir->len_in, ++ /*timeout*/HZ * USB_CTRL_GET_TIMEOUT); ++ if (ret < 0) ++ printk(DRIVER_NAME "[%d]: SET_INFRABUFFER_EMPTY: error %d\n", ++ devnum, ret); ++ ++ usb_set_intfdata(intf, ir); ++ return SUCCESS; ++} ++ ++ ++static void usb_remote_disconnect(struct usb_interface *intf) ++{ ++ struct usb_device *dev = interface_to_usbdev(intf); ++ struct irctl *ir = usb_get_intfdata(intf); ++ usb_set_intfdata(intf, NULL); ++ ++ if (!ir || !ir->p) ++ return; ++ ++ ir->usbdev = NULL; ++ wake_up_all(&ir->wait_out); ++ ++ usb_buffer_free(dev, ir->len_in, ir->buf_in, ir->dma_in); ++ ++ unregister_from_lirc(ir); ++} ++ ++static struct usb_device_id usb_remote_id_table[] = { ++ /* Igor Plug USB (Atmel's Manufact. ID) */ ++ { USB_DEVICE(0x03eb, 0x0002) }, ++ ++ /* Terminating entry */ ++ { } ++}; ++ ++static struct usb_driver usb_remote_driver = { ++ .name = DRIVER_NAME, ++ .probe = usb_remote_probe, ++ .disconnect = usb_remote_disconnect, ++ .id_table = usb_remote_id_table ++}; ++ ++static int __init usb_remote_init(void) ++{ ++ int i; ++ ++ printk(KERN_INFO "\n" ++ DRIVER_NAME ": " DRIVER_DESC " v" DRIVER_VERSION "\n"); ++ printk(DRIVER_NAME ": " DRIVER_AUTHOR "\n"); ++ dprintk(DRIVER_NAME ": debug mode enabled\n"); ++ ++#ifdef MODULE ++ request_module("lirc_dev"); ++#endif ++ ++ i = usb_register(&usb_remote_driver); ++ if (i < 0) { ++ printk(DRIVER_NAME ": usb register failed, result = %d\n", i); ++ return -ENODEV; ++ } ++ ++ return SUCCESS; ++} ++ ++static void __exit usb_remote_exit(void) ++{ ++ usb_deregister(&usb_remote_driver); ++} ++ ++#ifdef MODULE ++module_init(usb_remote_init); ++module_exit(usb_remote_exit); ++ ++#include ++MODULE_INFO(vermagic, VERMAGIC_STRING); ++ ++MODULE_DESCRIPTION(DRIVER_DESC); ++MODULE_AUTHOR(DRIVER_AUTHOR); ++MODULE_LICENSE("GPL"); ++MODULE_DEVICE_TABLE(usb, usb_remote_id_table); ++ ++module_param(sample_rate, int, 0644); ++MODULE_PARM_DESC(sample_rate, "Sampling rate in Hz (default: 100)"); ++ ++#else /* not MODULE */ ++subsys_initcall(usb_remote_driver); ++ ++#endif /* MODULE */ ++ ++/* ++ * Overrides for Emacs so that we follow Linus's tabbing style. ++ * --------------------------------------------------------------------------- ++ * Local variables: ++ * c-basic-offset: 8 ++ * End: ++ */ +diff --git a/drivers/input/lirc/lirc_imon.c b/drivers/input/lirc/lirc_imon.c +new file mode 100644 +index 0000000..b1714d2 +--- /dev/null ++++ b/drivers/input/lirc/lirc_imon.c +@@ -0,0 +1,1338 @@ ++/* ++ * lirc_imon.c: LIRC plugin/VFD driver for Ahanix/Soundgraph IMON IR/VFD ++ * ++ * Copyright(C) 2004 Venky Raju(dev@venky.ws) ++ * ++ * lirc_imon is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. ++ * ++ */ ++#include ++#include ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "lirc.h" ++#include "lirc_dev.h" ++ ++ ++#define MOD_AUTHOR "Venky Raju " ++#define MOD_DESC "Driver for Soundgraph iMON MultiMedia IR/VFD" ++#define MOD_NAME "lirc_imon" ++#define MOD_VERSION "0.4" ++ ++#define VFD_MINOR_BASE 144 /* Same as LCD */ ++#define DEVICE_NAME "lcd%d" ++ ++#define BUF_CHUNK_SIZE 4 ++#define BUF_SIZE 128 ++ ++#define BIT_DURATION 250 /* each bit received is 250us */ ++ ++#define SUCCESS 0 ++#define TRUE 1 ++#define FALSE 0 ++ ++ ++/* ------------------------------------------------------------ ++ * P R O T O T Y P E S ++ * ------------------------------------------------------------ ++ */ ++ ++/* USB Callback prototypes */ ++static int imon_probe(struct usb_interface *interface, ++ const struct usb_device_id *id); ++static void imon_disconnect(struct usb_interface *interface); ++static void usb_rx_callback(struct urb *urb); ++static void usb_tx_callback(struct urb *urb); ++ ++/* VFD file_operations function prototypes */ ++static int vfd_open(struct inode *inode, struct file *file); ++static int vfd_close(struct inode *inode, struct file *file); ++static ssize_t vfd_write(struct file *file, const char *buf, ++ size_t n_bytes, loff_t *pos); ++ ++/* LCD file_operations override function prototypes */ ++static ssize_t lcd_write(struct file *file, const char *buf, ++ size_t n_bytes, loff_t *pos); ++ ++/* LIRC plugin function prototypes */ ++static int ir_open(void *data); ++static void ir_close(void *data); ++ ++/* Driver init/exit prototypes */ ++static int __init imon_init(void); ++static void __exit imon_exit(void); ++ ++/* ------------------------------------------------------------ ++ * G L O B A L S ++ * ------------------------------------------------------------ ++ */ ++ ++struct imon_context { ++ struct usb_device *dev; ++ int vfd_supported; /* not all controllers do */ ++ int vfd_isopen; /* VFD port has been opened */ ++ int ir_isopen; /* IR port open */ ++ int ir_isassociating; /* IR port open for association */ ++ int dev_present; /* USB device presence */ ++ struct mutex lock; /* to lock this object */ ++ wait_queue_head_t remove_ok; /* For unexpected USB disconnects */ ++ ++ int vfd_proto_6p; /* VFD requires 6th packet */ ++ int ir_onboard_decode; /* IR signals decoded onboard */ ++ ++ struct lirc_plugin *plugin; ++ struct usb_endpoint_descriptor *rx_endpoint; ++ struct usb_endpoint_descriptor *tx_endpoint; ++ struct urb *rx_urb; ++ struct urb *tx_urb; ++ int tx_control; ++ unsigned char usb_rx_buf[8]; ++ unsigned char usb_tx_buf[8]; ++ ++ struct rx_data { ++ int count; /* length of 0 or 1 sequence */ ++ int prev_bit; /* logic level of sequence */ ++ int initial_space; /* initial space flag */ ++ } rx; ++ ++ struct tx_t { ++ unsigned char data_buf[35]; /* user data buffer */ ++ struct completion finished; /* wait for write to finish */ ++ atomic_t busy; /* write in progress */ ++ int status; /* status of tx completion */ ++ } tx; ++}; ++ ++#define LOCK_CONTEXT mutex_lock(&context->lock) ++#define UNLOCK_CONTEXT mutex_unlock(&context->lock) ++ ++/* VFD file operations */ ++static struct file_operations vfd_fops = { ++ .owner = THIS_MODULE, ++ .open = &vfd_open, ++ .write = &vfd_write, ++ .release = &vfd_close ++}; ++ ++enum { ++ IMON_DISPLAY_TYPE_AUTO, ++ IMON_DISPLAY_TYPE_VFD, ++ IMON_DISPLAY_TYPE_LCD, ++ IMON_DISPLAY_TYPE_NONE, ++}; ++ ++/* USB Device ID for IMON USB Control Board */ ++static struct usb_device_id imon_usb_id_table[] = { ++ /* IMON USB Control Board (IR & VFD) */ ++ { USB_DEVICE(0x0aa8, 0xffda) }, ++ /* IMON USB Control Board (IR only) */ ++ { USB_DEVICE(0x0aa8, 0x8001) }, ++ /* IMON USB Control Board (IR & VFD) */ ++ { USB_DEVICE(0x15c2, 0xffda) }, ++ /* IMON USB Control Board (IR only) */ ++ { USB_DEVICE(0x15c2, 0xffdc) }, ++ /* IMON USB Control Board (IR & LCD) */ ++ { USB_DEVICE(0x15c2, 0x0034) }, ++ /* IMON USB Control Board (IR & LCD) */ ++ { USB_DEVICE(0x15c2, 0x0036) }, ++ /* IMON USB Control Board (IR & LCD) */ ++ { USB_DEVICE(0x15c2, 0x0038) }, ++ /* IMON USB Control Board (ext IR only) */ ++ { USB_DEVICE(0x04e8, 0xff30) }, ++ {} ++}; ++ ++/* Some iMON VFD models requires a 6th packet */ ++static struct usb_device_id vfd_proto_6p_list[] = { ++ { USB_DEVICE(0x15c2, 0xffda) }, ++ { USB_DEVICE(0x15c2, 0xffdc) }, ++ { USB_DEVICE(0x15c2, 0x0034) }, ++ { USB_DEVICE(0x15c2, 0x0036) }, ++ { USB_DEVICE(0x15c2, 0x0038) }, ++ {} ++}; ++static unsigned char vfd_packet6[] = { ++ 0x01, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF }; ++ ++/* iMON LCD models use control endpoints and different write op */ ++static struct usb_device_id lcd_device_list[] = { ++ { USB_DEVICE(0x15c2, 0x0034) }, ++ { USB_DEVICE(0x15c2, 0x0036) }, ++ { USB_DEVICE(0x15c2, 0x0038) }, ++ {} ++}; ++ ++/* Newer iMON models decode the signal onboard */ ++static struct usb_device_id ir_onboard_decode_list[] = { ++ { USB_DEVICE(0x15c2, 0xffdc) }, ++ { USB_DEVICE(0x15c2, 0x0034) }, ++ { USB_DEVICE(0x15c2, 0x0036) }, ++ { USB_DEVICE(0x15c2, 0x0038) }, ++ {} ++}; ++ ++/* Some iMon devices have no lcd/vfd */ ++static struct usb_device_id ir_only_list[] = { ++ { USB_DEVICE(0x0aa8, 0x8001) }, ++ /* ++ * Nb: this device ID might actually be used by multiple devices, some ++ * with a display, some without. iMon Knob has this ID, is w/o. ++ */ ++ { USB_DEVICE(0x15c2, 0xffdc) }, ++ {} ++}; ++ ++/* USB Device data */ ++static struct usb_driver imon_driver = { ++ .name = MOD_NAME, ++ .probe = imon_probe, ++ .disconnect = imon_disconnect, ++ .id_table = imon_usb_id_table, ++}; ++ ++static struct usb_class_driver imon_class = { ++ .name = DEVICE_NAME, ++ .fops = &vfd_fops, ++ .minor_base = VFD_MINOR_BASE, ++}; ++ ++/* to prevent races between open() and disconnect() */ ++static DECLARE_MUTEX(disconnect_sem); ++ ++static int debug; ++ ++/* lcd, vfd or none? should be auto-detected, but can be overridden... */ ++static int display_type; ++ ++ ++/* ------------------------------------------------------------ ++ * M O D U L E C O D E ++ * ------------------------------------------------------------ ++ */ ++ ++MODULE_AUTHOR(MOD_AUTHOR); ++MODULE_DESCRIPTION(MOD_DESC); ++MODULE_LICENSE("GPL"); ++MODULE_DEVICE_TABLE(usb, imon_usb_id_table); ++module_param(debug, int, 0); ++MODULE_PARM_DESC(debug, "Debug messages: 0=no, 1=yes(default: no)"); ++module_param(display_type, int, 0); ++MODULE_PARM_DESC(display_type, "Type of attached display. 0=autodetect, " ++ "1=vfd, 2=lcd, 3=none (default: autodetect)"); ++ ++static inline void delete_context(struct imon_context *context) ++{ ++ if (context->vfd_supported) ++ usb_free_urb(context->tx_urb); ++ usb_free_urb(context->rx_urb); ++ lirc_buffer_free(context->plugin->rbuf); ++ kfree(context->plugin->rbuf); ++ kfree(context->plugin); ++ kfree(context); ++ ++ if (debug) ++ info("%s: context deleted", __func__); ++} ++ ++static inline void deregister_from_lirc(struct imon_context *context) ++{ ++ int retval; ++ int minor = context->plugin->minor; ++ ++ retval = lirc_unregister_plugin(minor); ++ if (retval) ++ err("%s: unable to deregister from lirc(%d)", ++ __func__, retval); ++ else ++ info("Deregistered iMON plugin(minor:%d)", minor); ++ ++} ++ ++/** ++ * Called when the VFD device(e.g. /dev/usb/lcd) ++ * is opened by the application. ++ */ ++static int vfd_open(struct inode *inode, struct file *file) ++{ ++ struct usb_interface *interface; ++ struct imon_context *context = NULL; ++ int subminor; ++ int retval = SUCCESS; ++ ++ /* prevent races with disconnect */ ++ down(&disconnect_sem); ++ ++ subminor = iminor(inode); ++ interface = usb_find_interface(&imon_driver, subminor); ++ if (!interface) { ++ err("%s: could not find interface for minor %d", ++ __func__, subminor); ++ retval = -ENODEV; ++ goto exit; ++ } ++ context = usb_get_intfdata(interface); ++ ++ if (!context) { ++ err("%s: no context found for minor %d", ++ __func__, subminor); ++ retval = -ENODEV; ++ goto exit; ++ } ++ ++ LOCK_CONTEXT; ++ ++ if (!context->vfd_supported) { ++ err("%s: VFD not supported by device", __func__); ++ retval = -ENODEV; ++ } else if (context->vfd_isopen) { ++ err("%s: VFD port is already open", __func__); ++ retval = -EBUSY; ++ } else { ++ context->vfd_isopen = TRUE; ++ file->private_data = context; ++ info("VFD port opened"); ++ } ++ ++ UNLOCK_CONTEXT; ++ ++exit: ++ up(&disconnect_sem); ++ return retval; ++} ++ ++/** ++ * Called when the VFD device(e.g. /dev/usb/lcd) ++ * is closed by the application. ++ */ ++static int vfd_close(struct inode *inode, struct file *file) ++{ ++ struct imon_context *context = NULL; ++ int retval = SUCCESS; ++ ++ context = (struct imon_context *) file->private_data; ++ ++ if (!context) { ++ err("%s: no context for device", __func__); ++ return -ENODEV; ++ } ++ ++ LOCK_CONTEXT; ++ ++ if (!context->vfd_supported) { ++ err("%s: VFD not supported by device", __func__); ++ retval = -ENODEV; ++ } else if (!context->vfd_isopen) { ++ err("%s: VFD is not open", __func__); ++ retval = -EIO; ++ } else { ++ context->vfd_isopen = FALSE; ++ info("VFD port closed"); ++ if (!context->dev_present && !context->ir_isopen) { ++ /* Device disconnected before close and IR port is not ++ * open. If IR port is open, context will be deleted by ++ * ir_close. */ ++ UNLOCK_CONTEXT; ++ delete_context(context); ++ return retval; ++ } ++ } ++ ++ UNLOCK_CONTEXT; ++ return retval; ++} ++ ++/** ++ * Sends a packet to the VFD. ++ */ ++static inline int send_packet(struct imon_context *context) ++{ ++ unsigned int pipe; ++ int interval = 0; ++ int retval = SUCCESS; ++ struct usb_ctrlrequest *control_req = NULL; ++ ++ /* Check if we need to use control or interrupt urb */ ++ if (!context->tx_control) { ++ pipe = usb_sndintpipe(context->dev, ++ context->tx_endpoint->bEndpointAddress); ++ interval = context->tx_endpoint->bInterval; ++ ++ usb_fill_int_urb(context->tx_urb, context->dev, pipe, ++ context->usb_tx_buf, ++ sizeof(context->usb_tx_buf), ++ usb_tx_callback, context, interval); ++ ++ context->tx_urb->actual_length = 0; ++ } else { ++ /* fill request into kmalloc'ed space: */ ++ control_req = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO); ++ if (control_req == NULL) ++ return -ENOMEM; ++ ++ /* setup packet is '21 09 0200 0001 0008' */ ++ control_req->bRequestType = 0x21; ++ control_req->bRequest = 0x09; ++ control_req->wValue = cpu_to_le16(0x0200); ++ control_req->wIndex = cpu_to_le16(0x0001); ++ control_req->wLength = cpu_to_le16(0x0008); ++ ++ /* control pipe is endpoint 0x00 */ ++ pipe = usb_sndctrlpipe(context->dev, 0); ++ ++ /* build the control urb */ ++ usb_fill_control_urb(context->tx_urb, context->dev, pipe, ++ (unsigned char *)control_req, ++ context->usb_tx_buf, ++ sizeof(context->usb_tx_buf), ++ usb_tx_callback, context); ++ context->tx_urb->actual_length = 0; ++ } ++ ++ init_completion(&context->tx.finished); ++ atomic_set(&(context->tx.busy), 1); ++ ++ retval = usb_submit_urb(context->tx_urb, GFP_KERNEL); ++ if (retval != SUCCESS) { ++ atomic_set(&(context->tx.busy), 0); ++ err("%s: error submitting urb(%d)", __func__, retval); ++ } else { ++ /* Wait for tranmission to complete(or abort) */ ++ UNLOCK_CONTEXT; ++ wait_for_completion(&context->tx.finished); ++ LOCK_CONTEXT; ++ ++ retval = context->tx.status; ++ if (retval != SUCCESS) ++ err("%s: packet tx failed(%d)", __func__, retval); ++ } ++ ++ kfree(control_req); ++ ++ return retval; ++} ++ ++/** ++ * Sends an associate packet to the iMON 2.4G. ++ * ++ * This might not be such a good idea, since it has an id ++ * collition with some versions of the "IR & VFD" combo. ++ * The only way to determine if it is a RF version is to look ++ * at the product description string.(Which we currently do ++ * not fetch). ++ */ ++static inline int send_associate_24g(struct imon_context *context) ++{ ++ int retval; ++ const unsigned char packet[8] = { 0x01, 0x00, 0x00, 0x00, ++ 0x00, 0x00, 0x00, 0x20 }; ++ ++ if (!context) { ++ err("%s: no context for device", __func__); ++ return -ENODEV; ++ } ++ ++ LOCK_CONTEXT; ++ ++ if (!context->dev_present) { ++ err("%s: no iMON device present", __func__); ++ retval = -ENODEV; ++ goto exit; ++ } ++ ++ memcpy(context->usb_tx_buf, packet, sizeof(packet)); ++ retval = send_packet(context); ++ ++exit: ++ UNLOCK_CONTEXT; ++ ++ return retval; ++} ++ ++/** ++ * This is the sysfs functions to handle the association og the iMON 2.4G LT. ++ * ++ * ++ */ ++ ++static ssize_t show_associate_remote(struct device *d, ++ struct device_attribute *attr, ++ char *buf) ++{ ++ struct imon_context *context = dev_get_drvdata(d); ++ ++ if (!context) ++ return -ENODEV; ++ ++ if (context->ir_isassociating) { ++ strcpy(buf, "The device it associating press some button " ++ "on the remote.\n"); ++ } else if (context->ir_isopen) { ++ strcpy(buf, "Device is open and ready to associate.\n" ++ "Echo something into this file to start " ++ "the process.\n"); ++ } else { ++ strcpy(buf, "Device is closed, you need to open it to " ++ "associate the remote(you can use irw).\n"); ++ } ++ return strlen(buf); ++} ++ ++static ssize_t store_associate_remote(struct device *d, ++ struct device_attribute *attr, ++ const char *buf, size_t count) ++{ ++ struct imon_context *context; ++ ++ context = dev_get_drvdata(d); ++ ++ if (!context) ++ return -ENODEV; ++ ++ if (!context->ir_isopen) ++ return -EINVAL; ++ ++ if (context->ir_isopen) { ++ context->ir_isassociating = TRUE; ++ send_associate_24g(context); ++ } ++ ++ return count; ++} ++ ++static DEVICE_ATTR(associate_remote, S_IWUSR | S_IRUGO, show_associate_remote, ++ store_associate_remote); ++ ++static struct attribute *imon_sysfs_entries[] = { ++ &dev_attr_associate_remote.attr, ++ NULL ++}; ++ ++static struct attribute_group imon_attribute_group = { ++ .attrs = imon_sysfs_entries ++}; ++ ++ ++ ++ ++/** ++ * Writes data to the VFD. The IMON VFD is 2x16 characters ++ * and requires data in 5 consecutive USB interrupt packets, ++ * each packet but the last carrying 7 bytes. ++ * ++ * I don't know if the VFD board supports features such as ++ * scrolling, clearing rows, blanking, etc. so at ++ * the caller must provide a full screen of data. If fewer ++ * than 32 bytes are provided spaces will be appended to ++ * generate a full screen. ++ */ ++static ssize_t vfd_write(struct file *file, const char *buf, ++ size_t n_bytes, loff_t *pos) ++{ ++ int i; ++ int offset; ++ int seq; ++ int retval = SUCCESS; ++ struct imon_context *context; ++ ++ context = (struct imon_context *) file->private_data; ++ if (!context) { ++ err("%s: no context for device", __func__); ++ return -ENODEV; ++ } ++ ++ LOCK_CONTEXT; ++ ++ if (!context->dev_present) { ++ err("%s: no iMON device present", __func__); ++ retval = -ENODEV; ++ goto exit; ++ } ++ ++ if (n_bytes <= 0 || n_bytes > 32) { ++ err("%s: invalid payload size", __func__); ++ retval = -EINVAL; ++ goto exit; ++ } ++ ++ if (copy_from_user(context->tx.data_buf, buf, n_bytes)) { ++ retval = -EFAULT; ++ goto exit; ++ } ++ ++ /* Pad with spaces */ ++ for (i = n_bytes; i < 32; ++i) ++ context->tx.data_buf[i] = ' '; ++ ++ for (i = 32; i < 35; ++i) ++ context->tx.data_buf[i] = 0xFF; ++ ++ offset = 0; ++ seq = 0; ++ ++ do { ++ memcpy(context->usb_tx_buf, context->tx.data_buf + offset, 7); ++ context->usb_tx_buf[7] = (unsigned char) seq; ++ ++ retval = send_packet(context); ++ if (retval != SUCCESS) { ++ err("%s: send packet failed for packet #%d", ++ __func__, seq/2); ++ goto exit; ++ } else { ++ seq += 2; ++ offset += 7; ++ } ++ ++ } while (offset < 35); ++ ++ if (context->vfd_proto_6p) { ++ /* Send packet #6 */ ++ memcpy(context->usb_tx_buf, vfd_packet6, 7); ++ context->usb_tx_buf[7] = (unsigned char) seq; ++ retval = send_packet(context); ++ if (retval != SUCCESS) ++ err("%s: send packet failed for packet #%d", ++ __func__, seq/2); ++ } ++ ++exit: ++ UNLOCK_CONTEXT; ++ ++ return (retval == SUCCESS) ? n_bytes : retval; ++} ++ ++/** ++ * Writes data to the LCD. The iMON OEM LCD screen excepts 8-byte ++ * packets. We accept data as 16 hexadecimal digits, followed by a ++ * newline (to make it easy to drive the device from a command-line ++ * -- even though the actual binary data is a bit complicated). ++ * ++ * The device itself is not a "traditional" text-mode display. It's ++ * actually a 16x96 pixel bitmap display. That means if you want to ++ * display text, you've got to have your own "font" and translate the ++ * text into bitmaps for display. This is really flexible (you can ++ * display whatever diacritics you need, and so on), but it's also ++ * a lot more complicated than most LCDs... ++ */ ++static ssize_t lcd_write(struct file *file, const char *buf, ++ size_t n_bytes, loff_t *pos) ++{ ++ int retval = SUCCESS; ++ struct imon_context *context; ++ ++ context = (struct imon_context *) file->private_data; ++ if (!context) { ++ err("%s: no context for device", __func__); ++ return -ENODEV; ++ } ++ ++ LOCK_CONTEXT; ++ ++ if (!context->dev_present) { ++ err("%s: no iMON device present", __func__); ++ retval = -ENODEV; ++ goto exit; ++ } ++ ++ if (n_bytes != 8) { ++ err("%s: invalid payload size: %d (expecting 8)", ++ __func__, (int) n_bytes); ++ retval = -EINVAL; ++ goto exit; ++ } ++ ++ if (copy_from_user(context->usb_tx_buf, buf, 8)) { ++ retval = -EFAULT; ++ goto exit; ++ } ++ ++ retval = send_packet(context); ++ if (retval != SUCCESS) { ++ err("%s: send packet failed!", __func__); ++ goto exit; ++ } else if (debug) { ++ info("%s: write %d bytes to LCD", __func__, (int) n_bytes); ++ } ++exit: ++ UNLOCK_CONTEXT; ++ return (retval == SUCCESS) ? n_bytes : retval; ++} ++ ++/** ++ * Callback function for USB core API: transmit data ++ */ ++static void usb_tx_callback(struct urb *urb) ++{ ++ struct imon_context *context; ++ ++ if (!urb) ++ return; ++ context = (struct imon_context *) urb->context; ++ if (!context) ++ return; ++ ++ context->tx.status = urb->status; ++ ++ /* notify waiters that write has finished */ ++ atomic_set(&context->tx.busy, 0); ++ complete(&context->tx.finished); ++ ++ return; ++} ++ ++/** ++ * Called by lirc_dev when the application opens /dev/lirc ++ */ ++static int ir_open(void *data) ++{ ++ int retval = SUCCESS; ++ struct imon_context *context; ++ ++ /* prevent races with disconnect */ ++ down(&disconnect_sem); ++ ++ context = (struct imon_context *) data; ++ ++ LOCK_CONTEXT; ++ ++ if (context->ir_isopen) { ++ err("%s: IR port is already open", __func__); ++ retval = -EBUSY; ++ goto exit; ++ } ++ ++ /* initial IR protocol decode variables */ ++ context->rx.count = 0; ++ context->rx.initial_space = 1; ++ context->rx.prev_bit = 0; ++ ++ usb_fill_int_urb(context->rx_urb, context->dev, ++ usb_rcvintpipe(context->dev, ++ context->rx_endpoint->bEndpointAddress), ++ context->usb_rx_buf, sizeof(context->usb_rx_buf), ++ usb_rx_callback, context, context->rx_endpoint->bInterval); ++ ++ retval = usb_submit_urb(context->rx_urb, GFP_KERNEL); ++ ++ if (retval) ++ err("%s: usb_submit_urb failed for ir_open(%d)", ++ __func__, retval); ++ else { ++ context->ir_isopen = TRUE; ++ info("IR port opened"); ++ } ++ ++exit: ++ UNLOCK_CONTEXT; ++ ++ up(&disconnect_sem); ++ return SUCCESS; ++} ++ ++/** ++ * Called by lirc_dev when the application closes /dev/lirc ++ */ ++static void ir_close(void *data) ++{ ++ struct imon_context *context; ++ ++ context = (struct imon_context *)data; ++ if (!context) { ++ err("%s: no context for device", __func__); ++ return; ++ } ++ ++ LOCK_CONTEXT; ++ ++ usb_kill_urb(context->rx_urb); ++ context->ir_isopen = FALSE; ++ context->ir_isassociating = FALSE; ++ info("IR port closed"); ++ ++ if (!context->dev_present) { ++ /* Device disconnected while IR port was ++ * still open. Plugin was not deregistered ++ * at disconnect time, so do it now. */ ++ deregister_from_lirc(context); ++ ++ if (!context->vfd_isopen) { ++ UNLOCK_CONTEXT; ++ delete_context(context); ++ return; ++ } ++ /* If VFD port is open, context will be deleted by vfd_close */ ++ } ++ ++ UNLOCK_CONTEXT; ++ return; ++} ++ ++/** ++ * Convert bit count to time duration(in us) and submit ++ * the value to lirc_dev. ++ */ ++static inline void submit_data(struct imon_context *context) ++{ ++ unsigned char buf[4]; ++ int value = context->rx.count; ++ int i; ++ ++ if (debug) ++ info("submitting data to LIRC\n"); ++ ++ value *= BIT_DURATION; ++ value &= PULSE_MASK; ++ if (context->rx.prev_bit) ++ value |= PULSE_BIT; ++ ++ for (i = 0; i < 4; ++i) ++ buf[i] = value>>(i*8); ++ ++ lirc_buffer_write_1(context->plugin->rbuf, buf); ++ wake_up(&context->plugin->rbuf->wait_poll); ++ return; ++} ++ ++/** ++ * Process the incoming packet ++ */ ++static inline void incoming_packet(struct imon_context *context, ++ struct urb *urb) ++{ ++ int len = urb->actual_length; ++ unsigned char *buf = urb->transfer_buffer; ++ int octet, bit; ++ unsigned char mask; ++ int chunk_num; ++#ifdef DEBUG ++ int i; ++#endif ++ ++ /* ++ * we need to add some special handling for ++ * the imon's IR mouse events ++ */ ++ if ((len == 5) && (buf[0] == 0x01) && (buf[4] == 0x00)) { ++ /* first, pad to 8 bytes so it conforms with everything else */ ++ buf[5] = buf[6] = buf[7] = 0; ++ len = 8; ++ ++ /* ++ * the imon directional pad functions more like a touchpad. ++ * Bytes 3 & 4 contain a position coordinate (x,y), with each ++ * component ranging from -14 to 14. Since this doesn't ++ * cooperate well with the way lirc works (it would appear to ++ * lirc as more than 100 different buttons) we need to map it ++ * to 4 discrete values. Also, when you get too close to ++ * diagonals, it has a tendancy to jump back and forth, so lets ++ * try to ignore when they get too close ++ */ ++ if ((buf[1] == 0) && ((buf[2] != 0) || (buf[3] != 0))) { ++ int y = (int)(char)buf[2]; ++ int x = (int)(char)buf[3]; ++ if (abs(abs(x) - abs(y)) < 3) { ++ return; ++ } else if (abs(y) > abs(x)) { ++ buf[2] = 0x00; ++ buf[3] = (y > 0) ? 0x7f : 0x80; ++ } else { ++ buf[3] = 0x00; ++ buf[2] = (x > 0) ? 0x7f : 0x80; ++ } ++ } ++ } ++ ++ if (len != 8) { ++ warn("%s: invalid incoming packet size(%d)", ++ __func__, len); ++ return; ++ } ++ ++ /* iMON 2.4G associate frame */ ++ if (buf[0] == 0x00 && ++ buf[2] == 0xFF && /* REFID */ ++ buf[3] == 0xFF && ++ buf[4] == 0xFF && ++ buf[5] == 0xFF && /* iMON 2.4G */ ++ ((buf[6] == 0x4E && buf[7] == 0xDF) || /* LT */ ++ (buf[6] == 0x5E && buf[7] == 0xDF))) { /* DT */ ++ warn("%s: remote associated refid=%02X", __func__, buf[1]); ++ context->ir_isassociating = FALSE; ++ } ++ ++ chunk_num = buf[7]; ++ ++ if (chunk_num == 0xFF) ++ return; /* filler frame, no data here */ ++ ++ if (buf[0] == 0xFF && ++ buf[1] == 0xFF && ++ buf[2] == 0xFF && ++ buf[3] == 0xFF && ++ buf[4] == 0xFF && ++ buf[5] == 0xFF && /* iMON 2.4G */ ++ ((buf[6] == 0x4E && buf[7] == 0xAF) || /* LT */ ++ (buf[6] == 0x5E && buf[7] == 0xAF))) /* DT */ ++ return; /* filler frame, no data here */ ++ ++#ifdef DEBUG ++ for (i = 0; i < 8; ++i) ++ printk(KERN_INFO "%02x ", buf[i]); ++ printk(KERN_INFO "\n"); ++#endif ++ ++ if (context->ir_onboard_decode) { ++ /* The signals have been decoded onboard the iMON controller */ ++ lirc_buffer_write_1(context->plugin->rbuf, buf); ++ wake_up(&context->plugin->rbuf->wait_poll); ++ return; ++ } ++ ++ /* Translate received data to pulse and space lengths. ++ * Received data is active low, i.e. pulses are 0 and ++ * spaces are 1. ++ * ++ * My original algorithm was essentially similar to ++ * Changwoo Ryu's with the exception that he switched ++ * the incoming bits to active high and also fed an ++ * initial space to LIRC at the start of a new sequence ++ * if the previous bit was a pulse. ++ * ++ * I've decided to adopt his algorithm. */ ++ ++ if (chunk_num == 1 && context->rx.initial_space) { ++ /* LIRC requires a leading space */ ++ context->rx.prev_bit = 0; ++ context->rx.count = 4; ++ submit_data(context); ++ context->rx.count = 0; ++ } ++ ++ for (octet = 0; octet < 5; ++octet) { ++ mask = 0x80; ++ for (bit = 0; bit < 8; ++bit) { ++ int curr_bit = !(buf[octet] & mask); ++ if (curr_bit != context->rx.prev_bit) { ++ if (context->rx.count) { ++ submit_data(context); ++ context->rx.count = 0; ++ } ++ context->rx.prev_bit = curr_bit; ++ } ++ ++context->rx.count; ++ mask >>= 1; ++ } ++ } ++ ++ if (chunk_num == 10) { ++ if (context->rx.count) { ++ submit_data(context); ++ context->rx.count = 0; ++ } ++ context->rx.initial_space = context->rx.prev_bit; ++ } ++} ++ ++/** ++ * Callback function for USB core API: receive data ++ */ ++static void usb_rx_callback(struct urb *urb) ++{ ++ struct imon_context *context; ++ ++ if (!urb) ++ return; ++ context = (struct imon_context *) urb->context; ++ if (!context) ++ return; ++ ++ switch (urb->status) { ++ case -ENOENT: /* usbcore unlink successful! */ ++ return; ++ case SUCCESS: ++ if (context->ir_isopen) ++ incoming_packet(context, urb); ++ break; ++ default: ++ warn("%s: status(%d): ignored", __func__, urb->status); ++ break; ++ } ++ ++ usb_submit_urb(context->rx_urb, GFP_ATOMIC); ++ return; ++} ++ ++ ++ ++/** ++ * Callback function for USB core API: Probe ++ */ ++static int imon_probe(struct usb_interface *interface, ++ const struct usb_device_id *id) ++{ ++ struct usb_device *dev = NULL; ++ struct usb_host_interface *iface_desc = NULL; ++ struct usb_endpoint_descriptor *rx_endpoint = NULL; ++ struct usb_endpoint_descriptor *tx_endpoint = NULL; ++ struct urb *rx_urb = NULL; ++ struct urb *tx_urb = NULL; ++ struct lirc_plugin *plugin = NULL; ++ struct lirc_buffer *rbuf = NULL; ++ int lirc_minor = 0; ++ int num_endpoints; ++ int retval = SUCCESS; ++ int vfd_ep_found; ++ int ir_ep_found; ++ int alloc_status; ++ int vfd_proto_6p = FALSE; ++ int ir_onboard_decode = FALSE; ++ int tx_control = FALSE; ++ int is_lcd = 0; ++ struct imon_context *context = NULL; ++ int i; ++ ++ info("%s: found IMON device", __func__); ++ ++ /* ++ * If it's the LCD, as opposed to the VFD, we just need to replace ++ * the "write" file op. ++ */ ++ if ((display_type == IMON_DISPLAY_TYPE_AUTO && ++ usb_match_id(interface, lcd_device_list)) || ++ display_type == IMON_DISPLAY_TYPE_LCD) { ++ vfd_fops.write = &lcd_write; ++ is_lcd = 1; ++ } ++ ++ dev = usb_get_dev(interface_to_usbdev(interface)); ++ iface_desc = interface->cur_altsetting; ++ num_endpoints = iface_desc->desc.bNumEndpoints; ++ ++ /* ++ * Scan the endpoint list and set: ++ * first input endpoint = IR endpoint ++ * first output endpoint = VFD endpoint ++ */ ++ ++ ir_ep_found = FALSE; ++ vfd_ep_found = FALSE; ++ ++ for (i = 0; i < num_endpoints && !(ir_ep_found && vfd_ep_found); ++i) { ++ struct usb_endpoint_descriptor *ep; ++ int ep_dir; ++ int ep_type; ++ ep = &iface_desc->endpoint[i].desc; ++ ep_dir = ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK; ++ ep_type = ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; ++ ++ if (!ir_ep_found && ++ ep_dir == USB_DIR_IN && ++ ep_type == USB_ENDPOINT_XFER_INT) { ++ ++ rx_endpoint = ep; ++ ir_ep_found = TRUE; ++ if (debug) ++ info("%s: found IR endpoint", __func__); ++ ++ } else if (!vfd_ep_found && ++ ep_dir == USB_DIR_OUT && ++ ep_type == USB_ENDPOINT_XFER_INT) { ++ tx_endpoint = ep; ++ vfd_ep_found = TRUE; ++ if (debug) ++ info("%s: found VFD endpoint", __func__); ++ } ++ } ++ ++ /* ++ * If we didn't find a vfd endpoint, and we have a next-gen LCD, ++ * use control urb instead of interrupt ++ */ ++ if (!vfd_ep_found) { ++ if (is_lcd) { ++ tx_control = 1; ++ vfd_ep_found = TRUE; ++ if (debug) ++ info("%s: LCD device uses control endpoint, " ++ "not interface OUT endpoint", __func__); ++ } ++ } ++ ++ /* ++ * Some iMon receivers have no display. Unfortunately, it seems ++ * that SoundGraph recycles device IDs between devices both with ++ * and without... :\ ++ */ ++ if ((display_type == IMON_DISPLAY_TYPE_AUTO && ++ usb_match_id(interface, ir_only_list)) || ++ display_type == IMON_DISPLAY_TYPE_NONE) { ++ tx_control = 0; ++ vfd_ep_found = FALSE; ++ if (debug) ++ info("%s: device has no display", __func__); ++ } ++ ++ /* Input endpoint is mandatory */ ++ if (!ir_ep_found) { ++ err("%s: no valid input(IR) endpoint found.", __func__); ++ retval = -ENODEV; ++ goto exit; ++ } else { ++ /* Determine if the IR signals are decoded onboard */ ++ if (usb_match_id(interface, ir_onboard_decode_list)) ++ ir_onboard_decode = TRUE; ++ ++ if (debug) ++ info("ir_onboard_decode: %d", ir_onboard_decode); ++ } ++ ++ /* Determine if VFD requires 6 packets */ ++ if (vfd_ep_found) { ++ if (usb_match_id(interface, vfd_proto_6p_list)) ++ vfd_proto_6p = TRUE; ++ ++ if (debug) ++ info("vfd_proto_6p: %d", vfd_proto_6p); ++ } ++ ++ ++ /* Allocate memory */ ++ ++ alloc_status = SUCCESS; ++ ++ context = kmalloc(sizeof(struct imon_context), GFP_KERNEL); ++ if (!context) { ++ err("%s: kmalloc failed for context", __func__); ++ alloc_status = 1; ++ goto alloc_status_switch; ++ } ++ plugin = kmalloc(sizeof(struct lirc_plugin), GFP_KERNEL); ++ if (!plugin) { ++ err("%s: kmalloc failed for lirc_plugin", __func__); ++ alloc_status = 2; ++ goto alloc_status_switch; ++ } ++ rbuf = kmalloc(sizeof(struct lirc_buffer), GFP_KERNEL); ++ if (!rbuf) { ++ err("%s: kmalloc failed for lirc_buffer", __func__); ++ alloc_status = 3; ++ goto alloc_status_switch; ++ } ++ if (lirc_buffer_init(rbuf, BUF_CHUNK_SIZE, BUF_SIZE)) { ++ err("%s: lirc_buffer_init failed", __func__); ++ alloc_status = 4; ++ goto alloc_status_switch; ++ } ++ rx_urb = usb_alloc_urb(0, GFP_KERNEL); ++ if (!rx_urb) { ++ err("%s: usb_alloc_urb failed for IR urb", __func__); ++ alloc_status = 5; ++ goto alloc_status_switch; ++ } ++ if (vfd_ep_found) { ++ tx_urb = usb_alloc_urb(0, GFP_KERNEL); ++ if (!tx_urb) { ++ err("%s: usb_alloc_urb failed for VFD urb", ++ __func__); ++ alloc_status = 6; ++ goto alloc_status_switch; ++ } ++ } ++ ++ /* clear all members of imon_context and lirc_plugin */ ++ memset(context, 0, sizeof(struct imon_context)); ++ mutex_init(&context->lock); ++ context->vfd_proto_6p = vfd_proto_6p; ++ context->ir_onboard_decode = ir_onboard_decode; ++ ++ memset(plugin, 0, sizeof(struct lirc_plugin)); ++ ++ strcpy(plugin->name, MOD_NAME); ++ plugin->minor = -1; ++ plugin->code_length = (ir_onboard_decode) ? ++ 32 : sizeof(int) * 8; ++ plugin->sample_rate = 0; ++ plugin->features = (ir_onboard_decode) ? ++ LIRC_CAN_REC_LIRCCODE : LIRC_CAN_REC_MODE2; ++ plugin->data = context; ++ plugin->rbuf = rbuf; ++ plugin->set_use_inc = ir_open; ++ plugin->set_use_dec = ir_close; ++ plugin->dev = &dev->dev; ++ plugin->owner = THIS_MODULE; ++ ++ LOCK_CONTEXT; ++ ++ lirc_minor = lirc_register_plugin(plugin); ++ if (lirc_minor < 0) { ++ err("%s: lirc_register_plugin failed", __func__); ++ alloc_status = 7; ++ UNLOCK_CONTEXT; ++ goto alloc_status_switch; ++ } else ++ info("%s: Registered iMON plugin(minor:%d)", ++ __func__, lirc_minor); ++ ++ /* Needed while unregistering! */ ++ plugin->minor = lirc_minor; ++ ++ context->dev = dev; ++ context->dev_present = TRUE; ++ context->rx_endpoint = rx_endpoint; ++ context->rx_urb = rx_urb; ++ if (vfd_ep_found) { ++ context->vfd_supported = TRUE; ++ context->tx_endpoint = tx_endpoint; ++ context->tx_urb = tx_urb; ++ context->tx_control = tx_control; ++ } ++ context->plugin = plugin; ++ ++ usb_set_intfdata(interface, context); ++ ++ if (cpu_to_le16(dev->descriptor.idProduct) == 0xffdc) { ++ int err; ++ ++ err = sysfs_create_group(&interface->dev.kobj, ++ &imon_attribute_group); ++ if (err) ++ err("%s: Could not create sysfs entries(%d)", ++ __func__, err); ++ } ++ ++ if (vfd_ep_found) { ++ if (debug) ++ info("Registering VFD with sysfs"); ++ if (usb_register_dev(interface, &imon_class)) { ++ /* Not a fatal error, so ignore */ ++ info("%s: could not get a minor number for VFD", ++ __func__); ++ } ++ } ++ ++ info("%s: iMON device on usb<%d:%d> initialized", ++ __func__, dev->bus->busnum, dev->devnum); ++ ++ UNLOCK_CONTEXT; ++ ++alloc_status_switch: ++ ++ switch (alloc_status) { ++ case 7: ++ if (vfd_ep_found) ++ usb_free_urb(tx_urb); ++ case 6: ++ usb_free_urb(rx_urb); ++ case 5: ++ lirc_buffer_free(rbuf); ++ case 4: ++ kfree(rbuf); ++ case 3: ++ kfree(plugin); ++ case 2: ++ kfree(context); ++ context = NULL; ++ case 1: ++ retval = -ENOMEM; ++ case SUCCESS: ++ ; ++ } ++ ++exit: ++ return retval; ++} ++ ++/** ++ * Callback function for USB core API: disonnect ++ */ ++static void imon_disconnect(struct usb_interface *interface) ++{ ++ struct imon_context *context; ++ ++ /* prevent races with ir_open()/vfd_open() */ ++ down(&disconnect_sem); ++ ++ context = usb_get_intfdata(interface); ++ LOCK_CONTEXT; ++ ++ info("%s: iMON device disconnected", __func__); ++ ++ /* sysfs_remove_group is safe to call even if sysfs_create_group ++ * hasn't been called */ ++ sysfs_remove_group(&interface->dev.kobj, ++ &imon_attribute_group); ++ usb_set_intfdata(interface, NULL); ++ context->dev_present = FALSE; ++ ++ /* Stop reception */ ++ usb_kill_urb(context->rx_urb); ++ ++ /* Abort ongoing write */ ++ if (atomic_read(&context->tx.busy)) { ++ usb_kill_urb(context->tx_urb); ++ wait_for_completion(&context->tx.finished); ++ } ++ ++ /* De-register from lirc_dev if IR port is not open */ ++ if (!context->ir_isopen) ++ deregister_from_lirc(context); ++ ++ if (context->vfd_supported) ++ usb_deregister_dev(interface, &imon_class); ++ ++ UNLOCK_CONTEXT; ++ ++ if (!context->ir_isopen && !context->vfd_isopen) ++ delete_context(context); ++ ++ up(&disconnect_sem); ++} ++ ++static int __init imon_init(void) ++{ ++ int rc; ++ ++ info(MOD_DESC ", v" MOD_VERSION); ++ info(MOD_AUTHOR); ++ ++ rc = usb_register(&imon_driver); ++ if (rc) { ++ err("%s: usb register failed(%d)", __func__, rc); ++ return -ENODEV; ++ } ++ return SUCCESS; ++} ++ ++static void __exit imon_exit(void) ++{ ++ usb_deregister(&imon_driver); ++ info("module removed. Goodbye!"); ++} ++ ++#ifdef MODULE ++module_init(imon_init); ++module_exit(imon_exit); ++ ++#else ++subsys_initcall(imon_init); ++ ++#endif /* MODULE */ +diff --git a/drivers/input/lirc/lirc_it87.c b/drivers/input/lirc/lirc_it87.c +new file mode 100644 +index 0000000..0a64847 +--- /dev/null ++++ b/drivers/input/lirc/lirc_it87.c +@@ -0,0 +1,999 @@ ++/* ++ * LIRC driver for ITE IT8712/IT8705 CIR port ++ * ++ * Copyright (C) 2001 Hans-Gunter Lutke Uphues ++ * ++ * This program is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License as ++ * published by the Free Software Foundation; either version 2 of the ++ * License, or (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 ++ * USA ++ * ++ * ITE IT8705 and IT8712(not tested) CIR-port support for lirc based ++ * via cut and paste from lirc_sir.c (C) 2000 Milan Pikula ++ * ++ * Attention: Sendmode only tested with debugging logs ++ * ++ * 2001/02/27 Christoph Bartelmus : ++ * reimplemented read function ++ * 2005/06/05 Andrew Calkin implemented support for Asus Digimatrix, ++ * based on work of the following member of the Outertrack Digimatrix ++ * Forum: Art103 ++ */ ++ ++ ++#include ++#include ++#include ++ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++ ++#include "lirc.h" ++#include "lirc_dev.h" ++ ++#include "lirc_it87.h" ++ ++#ifdef LIRC_IT87_DIGIMATRIX ++static int digimatrix = 1; ++static int it87_freq = 36; /* kHz */ ++static int irq = 9; ++#else ++static int digimatrix; ++static int it87_freq = 38; /* kHz */ ++static int irq = IT87_CIR_DEFAULT_IRQ; ++#endif ++ ++static unsigned long it87_bits_in_byte_out; ++static unsigned long it87_send_counter; ++static unsigned char it87_RXEN_mask = IT87_CIR_RCR_RXEN; ++ ++#define RBUF_LEN 1024 ++#define WBUF_LEN 1024 ++ ++#define LIRC_DRIVER_NAME "lirc_it87" ++ ++/* timeout for sequences in jiffies (=5/100s) */ ++/* must be longer than TIME_CONST */ ++#define IT87_TIMEOUT (HZ*5/100) ++ ++/* insmod parameters */ ++static int debug; ++#define dprintk(fmt, args...) \ ++ do { \ ++ if (debug) \ ++ printk(KERN_DEBUG LIRC_DRIVER_NAME ": " \ ++ fmt, ## args); \ ++ } while (0) ++ ++static int io = IT87_CIR_DEFAULT_IOBASE; ++/* receiver demodulator default: off */ ++static int it87_enable_demodulator; ++ ++static int timer_enabled; ++static DEFINE_SPINLOCK(timer_lock); ++static struct timer_list timerlist; ++/* time of last signal change detected */ ++static struct timeval last_tv = {0, 0}; ++/* time of last UART data ready interrupt */ ++static struct timeval last_intr_tv = {0, 0}; ++static int last_value; ++ ++static DECLARE_WAIT_QUEUE_HEAD(lirc_read_queue); ++ ++static DEFINE_SPINLOCK(hardware_lock); ++static DEFINE_SPINLOCK(dev_lock); ++ ++static int rx_buf[RBUF_LEN]; ++unsigned int rx_tail, rx_head; ++static int tx_buf[WBUF_LEN]; ++ ++/* SECTION: Prototypes */ ++ ++/* Communication with user-space */ ++static int lirc_open(struct inode *inode, struct file *file); ++static int lirc_close(struct inode *inode, struct file *file); ++static unsigned int lirc_poll(struct file *file, poll_table *wait); ++static ssize_t lirc_read(struct file *file, char *buf, ++ size_t count, loff_t *ppos); ++static ssize_t lirc_write(struct file *file, const char *buf, ++ size_t n, loff_t *pos); ++static int lirc_ioctl(struct inode *node, struct file *filep, ++ unsigned int cmd, unsigned long arg); ++static void add_read_queue(int flag, unsigned long val); ++#ifdef MODULE ++static int init_chrdev(void); ++static void drop_chrdev(void); ++#endif ++ /* Hardware */ ++static irqreturn_t it87_interrupt(int irq, void *dev_id); ++static void send_space(unsigned long len); ++static void send_pulse(unsigned long len); ++static void init_send(void); ++static void terminate_send(unsigned long len); ++static int init_hardware(void); ++static void drop_hardware(void); ++ /* Initialisation */ ++static int init_port(void); ++static void drop_port(void); ++ ++ ++/* SECTION: Communication with user-space */ ++ ++static int lirc_open(struct inode *inode, struct file *file) ++{ ++ spin_lock(&dev_lock); ++ if (module_refcount(THIS_MODULE)) { ++ spin_unlock(&dev_lock); ++ return -EBUSY; ++ } ++ spin_unlock(&dev_lock); ++ return 0; ++} ++ ++ ++static int lirc_close(struct inode *inode, struct file *file) ++{ ++ return 0; ++} ++ ++ ++static unsigned int lirc_poll(struct file *file, poll_table *wait) ++{ ++ poll_wait(file, &lirc_read_queue, wait); ++ if (rx_head != rx_tail) ++ return POLLIN | POLLRDNORM; ++ return 0; ++} ++ ++ ++static ssize_t lirc_read(struct file *file, char *buf, ++ size_t count, loff_t *ppos) ++{ ++ int n = 0; ++ int retval = 0; ++ ++ while (n < count) { ++ if (file->f_flags & O_NONBLOCK && rx_head == rx_tail) { ++ retval = -EAGAIN; ++ break; ++ } ++ retval = wait_event_interruptible(lirc_read_queue, ++ rx_head != rx_tail); ++ if (retval) ++ break; ++ ++ if (copy_to_user((void *) buf + n, (void *) (rx_buf + rx_head), ++ sizeof(int))) { ++ retval = -EFAULT; ++ break; ++ } ++ rx_head = (rx_head + 1) & (RBUF_LEN - 1); ++ n += sizeof(int); ++ } ++ if (n) ++ return n; ++ return retval; ++} ++ ++ ++static ssize_t lirc_write(struct file *file, const char *buf, ++ size_t n, loff_t *pos) ++{ ++ int i = 0; ++ ++ if (n % sizeof(int) || (n / sizeof(int)) > WBUF_LEN) ++ return -EINVAL; ++ if (copy_from_user(tx_buf, buf, n)) ++ return -EFAULT; ++ n /= sizeof(int); ++ init_send(); ++ while (1) { ++ if (i >= n) ++ break; ++ if (tx_buf[i]) ++ send_pulse(tx_buf[i]); ++ i++; ++ if (i >= n) ++ break; ++ if (tx_buf[i]) ++ send_space(tx_buf[i]); ++ i++; ++ } ++ terminate_send(tx_buf[i - 1]); ++ return n; ++} ++ ++ ++static int lirc_ioctl(struct inode *node, struct file *filep, ++ unsigned int cmd, unsigned long arg) ++{ ++ int retval = 0; ++ unsigned long value = 0; ++ unsigned int ivalue; ++ unsigned long hw_flags; ++ ++ if (cmd == LIRC_GET_FEATURES) ++ value = LIRC_CAN_SEND_PULSE | ++ LIRC_CAN_SET_SEND_CARRIER | ++ LIRC_CAN_REC_MODE2; ++ else if (cmd == LIRC_GET_SEND_MODE) ++ value = LIRC_MODE_PULSE; ++ else if (cmd == LIRC_GET_REC_MODE) ++ value = LIRC_MODE_MODE2; ++ ++ switch (cmd) { ++ case LIRC_GET_FEATURES: ++ case LIRC_GET_SEND_MODE: ++ case LIRC_GET_REC_MODE: ++ retval = put_user(value, (unsigned long *) arg); ++ break; ++ ++ case LIRC_SET_SEND_MODE: ++ case LIRC_SET_REC_MODE: ++ retval = get_user(value, (unsigned long *) arg); ++ break; ++ ++ case LIRC_SET_SEND_CARRIER: ++ retval = get_user(ivalue, (unsigned int *) arg); ++ if (retval) ++ return retval; ++ ivalue /= 1000; ++ if (ivalue > IT87_CIR_FREQ_MAX || ++ ivalue < IT87_CIR_FREQ_MIN) ++ return -EINVAL; ++ ++ it87_freq = ivalue; ++ ++ spin_lock_irqsave(&hardware_lock, hw_flags); ++ outb(((inb(io + IT87_CIR_TCR2) & IT87_CIR_TCR2_TXMPW) | ++ (it87_freq - IT87_CIR_FREQ_MIN) << 3), ++ io + IT87_CIR_TCR2); ++ spin_unlock_irqrestore(&hardware_lock, hw_flags); ++ dprintk("demodulation frequency: %d kHz\n", it87_freq); ++ ++ break; ++ ++ default: ++ retval = -ENOIOCTLCMD; ++ } ++ ++ if (retval) ++ return retval; ++ ++ if (cmd == LIRC_SET_REC_MODE) { ++ if (value != LIRC_MODE_MODE2) ++ retval = -ENOSYS; ++ } else if (cmd == LIRC_SET_SEND_MODE) { ++ if (value != LIRC_MODE_PULSE) ++ retval = -ENOSYS; ++ } ++ return retval; ++} ++ ++static void add_read_queue(int flag, unsigned long val) ++{ ++ unsigned int new_rx_tail; ++ int newval; ++ ++ dprintk("add flag %d with val %lu\n", flag, val); ++ ++ newval = val & PULSE_MASK; ++ ++ /* statistically pulses are ~TIME_CONST/2 too long: we could ++ maybe make this more exactly but this is good enough */ ++ if (flag) { ++ /* pulse */ ++ if (newval > TIME_CONST / 2) ++ newval -= TIME_CONST / 2; ++ else /* should not ever happen */ ++ newval = 1; ++ newval |= PULSE_BIT; ++ } else ++ newval += TIME_CONST / 2; ++ new_rx_tail = (rx_tail + 1) & (RBUF_LEN - 1); ++ if (new_rx_tail == rx_head) { ++ dprintk("Buffer overrun.\n"); ++ return; ++ } ++ rx_buf[rx_tail] = newval; ++ rx_tail = new_rx_tail; ++ wake_up_interruptible(&lirc_read_queue); ++} ++ ++ ++static struct file_operations lirc_fops = { ++ .read = lirc_read, ++ .write = lirc_write, ++ .poll = lirc_poll, ++ .ioctl = lirc_ioctl, ++ .open = lirc_open, ++ .release = lirc_close, ++}; ++ ++static int set_use_inc(void *data) ++{ ++ return 0; ++} ++ ++static void set_use_dec(void *data) ++{ ++} ++ ++static struct lirc_plugin plugin = { ++ .name = LIRC_DRIVER_NAME, ++ .minor = -1, ++ .code_length = 1, ++ .sample_rate = 0, ++ .data = NULL, ++ .add_to_buf = NULL, ++ .get_queue = NULL, ++ .set_use_inc = set_use_inc, ++ .set_use_dec = set_use_dec, ++ .fops = &lirc_fops, ++ .dev = NULL, ++ .owner = THIS_MODULE, ++}; ++ ++ ++#ifdef MODULE ++static int init_chrdev(void) ++{ ++ plugin.minor = lirc_register_plugin(&plugin); ++ ++ if (plugin.minor < 0) { ++ printk(KERN_ERR LIRC_DRIVER_NAME ": init_chrdev() failed.\n"); ++ return -EIO; ++ } ++ return 0; ++} ++ ++ ++static void drop_chrdev(void) ++{ ++ lirc_unregister_plugin(plugin.minor); ++} ++#endif ++ ++ ++/* SECTION: Hardware */ ++static long delta(struct timeval *tv1, struct timeval *tv2) ++{ ++ unsigned long deltv; ++ ++ deltv = tv2->tv_sec - tv1->tv_sec; ++ if (deltv > 15) ++ deltv = 0xFFFFFF; ++ else ++ deltv = deltv*1000000 + tv2->tv_usec - tv1->tv_usec; ++ return deltv; ++} ++ ++static void it87_timeout(unsigned long data) ++{ ++ unsigned long flags; ++ ++ /* avoid interference with interrupt */ ++ spin_lock_irqsave(&timer_lock, flags); ++ ++ if (digimatrix) { ++ /* We have timed out. ++ Disable the RX mechanism. ++ */ ++ ++ outb((inb(io + IT87_CIR_RCR) & ~IT87_CIR_RCR_RXEN) | ++ IT87_CIR_RCR_RXACT, io + IT87_CIR_RCR); ++ if (it87_RXEN_mask) ++ outb(inb(io + IT87_CIR_RCR) | IT87_CIR_RCR_RXEN, ++ io + IT87_CIR_RCR); ++ dprintk(" TIMEOUT\n"); ++ timer_enabled = 0; ++ ++ /* fifo clear */ ++ outb(inb(io + IT87_CIR_TCR1) | IT87_CIR_TCR1_FIFOCLR, ++ io+IT87_CIR_TCR1); ++ ++ } else { ++ /* if last received signal was a pulse, but receiving ++ stopped within the 9 bit frame, we need to finish ++ this pulse and simulate a signal change to from ++ pulse to space. Otherwise upper layers will receive ++ two sequences next time. */ ++ ++ if (last_value) { ++ unsigned long pulse_end; ++ ++ /* determine 'virtual' pulse end: */ ++ pulse_end = delta(&last_tv, &last_intr_tv); ++ dprintk("timeout add %d for %lu usec\n", ++ last_value, pulse_end); ++ add_read_queue(last_value, pulse_end); ++ last_value = 0; ++ last_tv = last_intr_tv; ++ } ++ } ++ spin_unlock_irqrestore(&timer_lock, flags); ++} ++ ++static irqreturn_t it87_interrupt(int irq, void *dev_id) ++{ ++ unsigned char data; ++ struct timeval curr_tv; ++ static unsigned long deltv; ++ unsigned long deltintrtv; ++ unsigned long flags, hw_flags; ++ int iir, lsr; ++ int fifo = 0; ++ static char lastbit; ++ char bit; ++ ++ /* Bit duration in microseconds */ ++ const unsigned long bit_duration = 1000000ul / ++ (115200 / IT87_CIR_BAUDRATE_DIVISOR); ++ ++ ++ iir = inb(io + IT87_CIR_IIR); ++ ++ switch (iir & IT87_CIR_IIR_IID) { ++ case 0x4: ++ case 0x6: ++ lsr = inb(io + IT87_CIR_RSR) & (IT87_CIR_RSR_RXFTO | ++ IT87_CIR_RSR_RXFBC); ++ fifo = lsr & IT87_CIR_RSR_RXFBC; ++ dprintk("iir: 0x%x fifo: 0x%x\n", iir, lsr); ++ ++ /* avoid interference with timer */ ++ spin_lock_irqsave(&timer_lock, flags); ++ spin_lock_irqsave(&hardware_lock, hw_flags); ++ if (digimatrix) { ++ static unsigned long acc_pulse; ++ static unsigned long acc_space; ++ ++ do { ++ data = inb(io + IT87_CIR_DR); ++ data = ~data; ++ fifo--; ++ if (data != 0x00) { ++ if (timer_enabled) ++ del_timer(&timerlist); ++ /* start timer for end of ++ * sequence detection */ ++ timerlist.expires = jiffies + ++ IT87_TIMEOUT; ++ add_timer(&timerlist); ++ timer_enabled = 1; ++ } ++ /* Loop through */ ++ for (bit = 0; bit < 8; ++bit) { ++ if ((data >> bit) & 1) { ++ ++acc_pulse; ++ if (lastbit == 0) { ++ add_read_queue(0, ++ acc_space * ++ bit_duration); ++ acc_space = 0; ++ } ++ } else { ++ ++acc_space; ++ if (lastbit == 1) { ++ add_read_queue(1, ++ acc_pulse * ++ bit_duration); ++ acc_pulse = 0; ++ } ++ } ++ lastbit = (data >> bit) & 1; ++ } ++ ++ } while (fifo != 0); ++ } else { /* Normal Operation */ ++ do { ++ del_timer(&timerlist); ++ data = inb(io + IT87_CIR_DR); ++ ++ dprintk("data=%.2x\n", data); ++ do_gettimeofday(&curr_tv); ++ deltv = delta(&last_tv, &curr_tv); ++ deltintrtv = delta(&last_intr_tv, &curr_tv); ++ ++ dprintk("t %lu , d %d\n", ++ deltintrtv, (int)data); ++ ++ /* if nothing came in last 2 cycles, ++ it was gap */ ++ if (deltintrtv > TIME_CONST * 2) { ++ if (last_value) { ++ dprintk("GAP\n"); ++ ++ /* simulate signal change */ ++ add_read_queue(last_value, ++ deltv - ++ deltintrtv); ++ last_value = 0; ++ last_tv.tv_sec = ++ last_intr_tv.tv_sec; ++ last_tv.tv_usec = ++ last_intr_tv.tv_usec; ++ deltv = deltintrtv; ++ } ++ } ++ data = 1; ++ if (data ^ last_value) { ++ /* deltintrtv > 2*TIME_CONST, ++ remember ? */ ++ /* the other case is timeout */ ++ add_read_queue(last_value, ++ deltv-TIME_CONST); ++ last_value = data; ++ last_tv = curr_tv; ++ if (last_tv.tv_usec >= TIME_CONST) ++ last_tv.tv_usec -= TIME_CONST; ++ else { ++ last_tv.tv_sec--; ++ last_tv.tv_usec += 1000000 - ++ TIME_CONST; ++ } ++ } ++ last_intr_tv = curr_tv; ++ if (data) { ++ /* start timer for end of ++ * sequence detection */ ++ timerlist.expires = ++ jiffies + IT87_TIMEOUT; ++ add_timer(&timerlist); ++ } ++ outb((inb(io + IT87_CIR_RCR) & ++ ~IT87_CIR_RCR_RXEN) | ++ IT87_CIR_RCR_RXACT, ++ io + IT87_CIR_RCR); ++ if (it87_RXEN_mask) ++ outb(inb(io + IT87_CIR_RCR) | ++ IT87_CIR_RCR_RXEN, ++ io + IT87_CIR_RCR); ++ fifo--; ++ } while (fifo != 0); ++ } ++ spin_unlock_irqrestore(&hardware_lock, hw_flags); ++ spin_unlock_irqrestore(&timer_lock, flags); ++ ++ return IRQ_RETVAL(IRQ_HANDLED); ++ ++ default: ++ /* not our irq */ ++ dprintk("unknown IRQ (shouldn't happen) !!\n"); ++ return IRQ_RETVAL(IRQ_NONE); ++ } ++} ++ ++ ++static void send_it87(unsigned long len, unsigned long stime, ++ unsigned char send_byte, unsigned int count_bits) ++{ ++ long count = len / stime; ++ long time_left = 0; ++ static unsigned char byte_out; ++ unsigned long hw_flags; ++ ++ dprintk("%s: len=%ld, sb=%d\n", __func__, len, send_byte); ++ ++ time_left = (long)len - (long)count * (long)stime; ++ count += ((2 * time_left) / stime); ++ while (count) { ++ long i = 0; ++ for (i = 0; i < count_bits; i++) { ++ byte_out = (byte_out << 1) | (send_byte & 1); ++ it87_bits_in_byte_out++; ++ } ++ if (it87_bits_in_byte_out == 8) { ++ dprintk("out=0x%x, tsr_txfbc: 0x%x\n", ++ byte_out, ++ inb(io + IT87_CIR_TSR) & ++ IT87_CIR_TSR_TXFBC); ++ ++ while ((inb(io + IT87_CIR_TSR) & ++ IT87_CIR_TSR_TXFBC) >= IT87_CIR_FIFO_SIZE) ++ ; ++ ++ spin_lock_irqsave(&hardware_lock, hw_flags); ++ outb(byte_out, io + IT87_CIR_DR); ++ spin_unlock_irqrestore(&hardware_lock, hw_flags); ++ ++ it87_bits_in_byte_out = 0; ++ it87_send_counter++; ++ byte_out = 0; ++ } ++ count--; ++ } ++} ++ ++ ++/* ++maybe: exchange space and pulse because ++it8705 only modulates 0-bits ++*/ ++ ++ ++static void send_space(unsigned long len) ++{ ++ send_it87(len, TIME_CONST, IT87_CIR_SPACE, IT87_CIR_BAUDRATE_DIVISOR); ++} ++ ++static void send_pulse(unsigned long len) ++{ ++ send_it87(len, TIME_CONST, IT87_CIR_PULSE, IT87_CIR_BAUDRATE_DIVISOR); ++} ++ ++ ++static void init_send() ++{ ++ unsigned long flags; ++ ++ spin_lock_irqsave(&hardware_lock, flags); ++ /* RXEN=0: receiver disable */ ++ it87_RXEN_mask = 0; ++ outb(inb(io + IT87_CIR_RCR) & ~IT87_CIR_RCR_RXEN, ++ io + IT87_CIR_RCR); ++ spin_unlock_irqrestore(&hardware_lock, flags); ++ it87_bits_in_byte_out = 0; ++ it87_send_counter = 0; ++} ++ ++ ++static void terminate_send(unsigned long len) ++{ ++ unsigned long flags; ++ unsigned long last = 0; ++ ++ last = it87_send_counter; ++ /* make sure all necessary data has been sent */ ++ while (last == it87_send_counter) ++ send_space(len); ++ /* wait until all data sent */ ++ while ((inb(io + IT87_CIR_TSR) & IT87_CIR_TSR_TXFBC) != 0) ++ ; ++ /* then reenable receiver */ ++ spin_lock_irqsave(&hardware_lock, flags); ++ it87_RXEN_mask = IT87_CIR_RCR_RXEN; ++ outb(inb(io + IT87_CIR_RCR) | IT87_CIR_RCR_RXEN, ++ io + IT87_CIR_RCR); ++ spin_unlock_irqrestore(&hardware_lock, flags); ++} ++ ++ ++static int init_hardware(void) ++{ ++ unsigned long flags; ++ unsigned char it87_rcr = 0; ++ ++ spin_lock_irqsave(&hardware_lock, flags); ++ /* init cir-port */ ++ /* enable r/w-access to Baudrate-Register */ ++ outb(IT87_CIR_IER_BR, io + IT87_CIR_IER); ++ outb(IT87_CIR_BAUDRATE_DIVISOR % 0x100, io+IT87_CIR_BDLR); ++ outb(IT87_CIR_BAUDRATE_DIVISOR / 0x100, io+IT87_CIR_BDHR); ++ /* Baudrate Register off, define IRQs: Input only */ ++ if (digimatrix) { ++ outb(IT87_CIR_IER_IEC | IT87_CIR_IER_RFOIE, io + IT87_CIR_IER); ++ /* RX: HCFS=0, RXDCR = 001b (33,75..38,25 kHz), RXEN=1 */ ++ } else { ++ outb(IT87_CIR_IER_IEC | IT87_CIR_IER_RDAIE, io + IT87_CIR_IER); ++ /* RX: HCFS=0, RXDCR = 001b (35,6..40,3 kHz), RXEN=1 */ ++ } ++ it87_rcr = (IT87_CIR_RCR_RXEN & it87_RXEN_mask) | 0x1; ++ if (it87_enable_demodulator) ++ it87_rcr |= IT87_CIR_RCR_RXEND; ++ outb(it87_rcr, io + IT87_CIR_RCR); ++ if (digimatrix) { ++ /* Set FIFO depth to 1 byte, and disable TX */ ++ outb(inb(io + IT87_CIR_TCR1) | 0x00, ++ io + IT87_CIR_TCR1); ++ ++ /* TX: it87_freq (36kHz), ++ 'reserved' sensitivity setting (0x00) */ ++ outb(((it87_freq - IT87_CIR_FREQ_MIN) << 3) | 0x00, ++ io + IT87_CIR_TCR2); ++ } else { ++ /* TX: 38kHz, 13,3us (pulse-width */ ++ outb(((it87_freq - IT87_CIR_FREQ_MIN) << 3) | 0x06, ++ io + IT87_CIR_TCR2); ++ } ++ spin_unlock_irqrestore(&hardware_lock, flags); ++ return 0; ++} ++ ++ ++static void drop_hardware(void) ++{ ++ unsigned long flags; ++ ++ spin_lock_irqsave(&hardware_lock, flags); ++ disable_irq(irq); ++ /* receiver disable */ ++ it87_RXEN_mask = 0; ++ outb(0x1, io + IT87_CIR_RCR); ++ /* turn off irqs */ ++ outb(0, io + IT87_CIR_IER); ++ /* fifo clear */ ++ outb(IT87_CIR_TCR1_FIFOCLR, io+IT87_CIR_TCR1); ++ /* reset */ ++ outb(IT87_CIR_IER_RESET, io+IT87_CIR_IER); ++ enable_irq(irq); ++ spin_unlock_irqrestore(&hardware_lock, flags); ++} ++ ++ ++static unsigned char it87_read(unsigned char port) ++{ ++ outb(port, IT87_ADRPORT); ++ return inb(IT87_DATAPORT); ++} ++ ++ ++static void it87_write(unsigned char port, unsigned char data) ++{ ++ outb(port, IT87_ADRPORT); ++ outb(data, IT87_DATAPORT); ++} ++ ++ ++/* SECTION: Initialisation */ ++ ++static int init_port(void) ++{ ++ unsigned long hw_flags; ++ int retval = 0; ++ ++ unsigned char init_bytes[4] = IT87_INIT; ++ unsigned char it87_chipid = 0; ++ unsigned char ldn = 0; ++ unsigned int it87_io = 0; ++ unsigned int it87_irq = 0; ++ ++ /* Enter MB PnP Mode */ ++ outb(init_bytes[0], IT87_ADRPORT); ++ outb(init_bytes[1], IT87_ADRPORT); ++ outb(init_bytes[2], IT87_ADRPORT); ++ outb(init_bytes[3], IT87_ADRPORT); ++ ++ /* 8712 or 8705 ? */ ++ it87_chipid = it87_read(IT87_CHIP_ID1); ++ if (it87_chipid != 0x87) { ++ retval = -ENXIO; ++ return retval; ++ } ++ it87_chipid = it87_read(IT87_CHIP_ID2); ++ if ((it87_chipid != 0x12) && (it87_chipid != 0x05)) { ++ printk(KERN_INFO LIRC_DRIVER_NAME ++ ": no IT8705/12 found, exiting..\n"); ++ retval = -ENXIO; ++ return retval; ++ } ++ printk(KERN_INFO LIRC_DRIVER_NAME ++ ": found IT87%.2x.\n", ++ it87_chipid); ++ ++ /* get I/O-Port and IRQ */ ++ if (it87_chipid == 0x12) ++ ldn = IT8712_CIR_LDN; ++ else ++ ldn = IT8705_CIR_LDN; ++ it87_write(IT87_LDN, ldn); ++ ++ it87_io = it87_read(IT87_CIR_BASE_MSB) * 256 + ++ it87_read(IT87_CIR_BASE_LSB); ++ if (it87_io == 0) { ++ if (io == 0) ++ io = IT87_CIR_DEFAULT_IOBASE; ++ printk(KERN_INFO LIRC_DRIVER_NAME ++ ": set default io 0x%x\n", ++ io); ++ it87_write(IT87_CIR_BASE_MSB, io / 0x100); ++ it87_write(IT87_CIR_BASE_LSB, io % 0x100); ++ } else ++ io = it87_io; ++ ++ it87_irq = it87_read(IT87_CIR_IRQ); ++ if (digimatrix || it87_irq == 0) { ++ if (irq == 0) ++ irq = IT87_CIR_DEFAULT_IRQ; ++ printk(KERN_INFO LIRC_DRIVER_NAME ++ ": set default irq 0x%x\n", ++ irq); ++ it87_write(IT87_CIR_IRQ, irq); ++ } else ++ irq = it87_irq; ++ ++ spin_lock_irqsave(&hardware_lock, hw_flags); ++ /* reset */ ++ outb(IT87_CIR_IER_RESET, io+IT87_CIR_IER); ++ /* fifo clear */ ++ outb(IT87_CIR_TCR1_FIFOCLR | ++ /* IT87_CIR_TCR1_ILE | */ ++ IT87_CIR_TCR1_TXRLE | ++ IT87_CIR_TCR1_TXENDF, io+IT87_CIR_TCR1); ++ spin_unlock_irqrestore(&hardware_lock, hw_flags); ++ ++ /* get I/O port access and IRQ line */ ++ if (request_region(io, 8, LIRC_DRIVER_NAME) == NULL) { ++ printk(KERN_ERR LIRC_DRIVER_NAME ++ ": i/o port 0x%.4x already in use.\n", io); ++ /* Leaving MB PnP Mode */ ++ it87_write(IT87_CFGCTRL, 0x2); ++ return -EBUSY; ++ } ++ ++ /* activate CIR-Device */ ++ it87_write(IT87_CIR_ACT, 0x1); ++ ++ /* Leaving MB PnP Mode */ ++ it87_write(IT87_CFGCTRL, 0x2); ++ ++ retval = request_irq(irq, it87_interrupt, 0 /*IRQF_DISABLED*/, ++ LIRC_DRIVER_NAME, NULL); ++ if (retval < 0) { ++ printk(KERN_ERR LIRC_DRIVER_NAME ++ ": IRQ %d already in use.\n", ++ irq); ++ release_region(io, 8); ++ return retval; ++ } ++ ++ printk(KERN_INFO LIRC_DRIVER_NAME ++ ": I/O port 0x%.4x, IRQ %d.\n", io, irq); ++ ++ init_timer(&timerlist); ++ timerlist.function = it87_timeout; ++ timerlist.data = 0xabadcafe; ++ ++ return 0; ++} ++ ++ ++static void drop_port(void) ++{ ++/* ++ unsigned char init_bytes[4] = IT87_INIT; ++ ++ / * Enter MB PnP Mode * / ++ outb(init_bytes[0], IT87_ADRPORT); ++ outb(init_bytes[1], IT87_ADRPORT); ++ outb(init_bytes[2], IT87_ADRPORT); ++ outb(init_bytes[3], IT87_ADRPORT); ++ ++ / * deactivate CIR-Device * / ++ it87_write(IT87_CIR_ACT, 0x0); ++ ++ / * Leaving MB PnP Mode * / ++ it87_write(IT87_CFGCTRL, 0x2); ++*/ ++ ++ del_timer_sync(&timerlist); ++ free_irq(irq, NULL); ++ release_region(io, 8); ++} ++ ++ ++static int init_lirc_it87(void) ++{ ++ int retval; ++ ++ init_waitqueue_head(&lirc_read_queue); ++ retval = init_port(); ++ if (retval < 0) ++ return retval; ++ init_hardware(); ++ printk(KERN_INFO LIRC_DRIVER_NAME ": Installed.\n"); ++ return 0; ++} ++ ++ ++#ifdef MODULE ++ ++static int __init lirc_it87_init(void) ++{ ++ int retval; ++ ++ retval = init_chrdev(); ++ if (retval < 0) ++ return retval; ++ retval = init_lirc_it87(); ++ if (retval) { ++ drop_chrdev(); ++ return retval; ++ } ++ return 0; ++} ++ ++ ++static void __exit lirc_it87_exit(void) ++{ ++ drop_hardware(); ++ drop_chrdev(); ++ drop_port(); ++ printk(KERN_INFO LIRC_DRIVER_NAME ": Uninstalled.\n"); ++} ++ ++module_init(lirc_it87_init); ++module_exit(lirc_it87_exit); ++ ++MODULE_DESCRIPTION("LIRC driver for ITE IT8712/IT8705 CIR port"); ++MODULE_AUTHOR("Hans-Gunter Lutke Uphues"); ++MODULE_LICENSE("GPL"); ++ ++module_param(io, int, 0444); ++MODULE_PARM_DESC(io, "I/O base address (default: 0x310)"); ++ ++module_param(irq, int, 0444); ++#ifdef LIRC_IT87_DIGIMATRIX ++MODULE_PARM_DESC(irq, "Interrupt (1,3-12) (default: 9)"); ++#else ++MODULE_PARM_DESC(irq, "Interrupt (1,3-12) (default: 7)"); ++#endif ++ ++module_param(it87_enable_demodulator, bool, 0444); ++MODULE_PARM_DESC(it87_enable_demodulator, ++ "Receiver demodulator enable/disable (1/0), default: 0"); ++ ++module_param(debug, bool, 0644); ++MODULE_PARM_DESC(debug, "Enable debugging messages"); ++ ++module_param(digimatrix, bool, 0644); ++#ifdef LIRC_IT87_DIGIMATRIX ++MODULE_PARM_DESC(digimatrix, ++ "Asus Digimatrix it87 compat. enable/disable (1/0), default: 1"); ++#else ++MODULE_PARM_DESC(digimatrix, ++ "Asus Digimatrix it87 compat. enable/disable (1/0), default: 0"); ++#endif ++ ++ ++module_param(it87_freq, int, 0444); ++#ifdef LIRC_IT87_DIGIMATRIX ++MODULE_PARM_DESC(it87_freq, ++ "Carrier demodulator frequency (kHz), (default: 36)"); ++#else ++MODULE_PARM_DESC(it87_freq, ++ "Carrier demodulator frequency (kHz), (default: 38)"); ++#endif ++ ++#endif /* MODULE */ ++ ++ ++/* ++ * Overrides for Emacs so that we follow Linus's tabbing style. ++ * --------------------------------------------------------------------------- ++ * Local variables: ++ * c-basic-offset: 8 ++ * End: ++ */ +diff --git a/drivers/input/lirc/lirc_it87.h b/drivers/input/lirc/lirc_it87.h +new file mode 100644 +index 0000000..a997204 +--- /dev/null ++++ b/drivers/input/lirc/lirc_it87.h +@@ -0,0 +1,116 @@ ++/* lirc_it87.h */ ++/* SECTION: Definitions */ ++ ++/********************************* ITE IT87xx ************************/ ++ ++/* based on the following documentation from ITE: ++ a) IT8712F Preliminary CIR Programming Guide V0.1 ++ b) IT8705F Simple LPC I/O Preliminary Specifiction V0.3 ++ c) IT8712F EC-LPC I/O Preliminary Specification V0.5 ++*/ ++ ++/* IT8712/05 Ports: */ ++#define IT87_ADRPORT 0x2e ++#define IT87_DATAPORT 0x2f ++#define IT87_INIT {0x87, 0x01, 0x55, 0x55} ++ ++/* alternate Ports: */ ++/* ++#define IT87_ADRPORT 0x4e ++#define IT87_DATAPORT 0x4f ++#define IT87_INIT {0x87, 0x01, 0x55, 0xaa} ++ */ ++ ++/* IT8712/05 Registers */ ++#define IT87_CFGCTRL 0x2 ++#define IT87_LDN 0x7 ++#define IT87_CHIP_ID1 0x20 ++#define IT87_CHIP_ID2 0x21 ++#define IT87_CFG_VERSION 0x22 ++#define IT87_SWSUSPEND 0x23 ++ ++#define IT8712_CIR_LDN 0xa ++#define IT8705_CIR_LDN 0x7 ++ ++/* CIR Configuration Registers: */ ++#define IT87_CIR_ACT 0x30 ++#define IT87_CIR_BASE_MSB 0x60 ++#define IT87_CIR_BASE_LSB 0x61 ++#define IT87_CIR_IRQ 0x70 ++#define IT87_CIR_CONFIG 0xf0 ++ ++/* List of IT87_CIR registers: offset to BaseAddr */ ++#define IT87_CIR_DR 0 ++#define IT87_CIR_IER 1 ++#define IT87_CIR_RCR 2 ++#define IT87_CIR_TCR1 3 ++#define IT87_CIR_TCR2 4 ++#define IT87_CIR_TSR 5 ++#define IT87_CIR_RSR 6 ++#define IT87_CIR_BDLR 5 ++#define IT87_CIR_BDHR 6 ++#define IT87_CIR_IIR 7 ++ ++/* Bit Definitionen */ ++/* IER: */ ++#define IT87_CIR_IER_TM_EN 0x80 ++#define IT87_CIR_IER_RESEVED 0x40 ++#define IT87_CIR_IER_RESET 0x20 ++#define IT87_CIR_IER_BR 0x10 ++#define IT87_CIR_IER_IEC 0x8 ++#define IT87_CIR_IER_RFOIE 0x4 ++#define IT87_CIR_IER_RDAIE 0x2 ++#define IT87_CIR_IER_TLDLIE 0x1 ++ ++/* RCR: */ ++#define IT87_CIR_RCR_RDWOS 0x80 ++#define IT87_CIR_RCR_HCFS 0x40 ++#define IT87_CIR_RCR_RXEN 0x20 ++#define IT87_CIR_RCR_RXEND 0x10 ++#define IT87_CIR_RCR_RXACT 0x8 ++#define IT87_CIR_RCR_RXDCR 0x7 ++ ++/* TCR1: */ ++#define IT87_CIR_TCR1_FIFOCLR 0x80 ++#define IT87_CIR_TCR1_ILE 0x40 ++#define IT87_CIR_TCR1_FIFOTL 0x30 ++#define IT87_CIR_TCR1_TXRLE 0x8 ++#define IT87_CIR_TCR1_TXENDF 0x4 ++#define IT87_CIR_TCR1_TXMPM 0x3 ++ ++/* TCR2: */ ++#define IT87_CIR_TCR2_CFQ 0xf8 ++#define IT87_CIR_TCR2_TXMPW 0x7 ++ ++/* TSR: */ ++#define IT87_CIR_TSR_RESERVED 0xc0 ++#define IT87_CIR_TSR_TXFBC 0x3f ++ ++/* RSR: */ ++#define IT87_CIR_RSR_RXFTO 0x80 ++#define IT87_CIR_RSR_RESERVED 0x40 ++#define IT87_CIR_RSR_RXFBC 0x3f ++ ++/* IIR: */ ++#define IT87_CIR_IIR_RESERVED 0xf8 ++#define IT87_CIR_IIR_IID 0x6 ++#define IT87_CIR_IIR_IIP 0x1 ++ ++/* TM: */ ++#define IT87_CIR_TM_IL_SEL 0x80 ++#define IT87_CIR_TM_RESERVED 0x40 ++#define IT87_CIR_TM_TM_REG 0x3f ++ ++#define IT87_CIR_FIFO_SIZE 32 ++ ++/* Baudratedivisor for IT87: power of 2: only 1,2,4 or 8) */ ++#define IT87_CIR_BAUDRATE_DIVISOR 0x1 ++#define IT87_CIR_DEFAULT_IOBASE 0x310 ++#define IT87_CIR_DEFAULT_IRQ 0x7 ++#define IT87_CIR_SPACE 0x00 ++#define IT87_CIR_PULSE 0xff ++#define IT87_CIR_FREQ_MIN 27 ++#define IT87_CIR_FREQ_MAX 58 ++#define TIME_CONST (IT87_CIR_BAUDRATE_DIVISOR * 8000000ul / 115200ul) ++ ++/********************************* ITE IT87xx ************************/ +diff --git a/drivers/input/lirc/lirc_ite8709.c b/drivers/input/lirc/lirc_ite8709.c +new file mode 100644 +index 0000000..d03ecf7 +--- /dev/null ++++ b/drivers/input/lirc/lirc_ite8709.c +@@ -0,0 +1,545 @@ ++/* ++ * LIRC driver for ITE8709 CIR port ++ * ++ * Copyright (C) 2008 Grégory Lardière ++ * ++ * This program is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License as ++ * published by the Free Software Foundation; either version 2 of the ++ * License, or (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 ++ * USA ++ */ ++ ++#include ++#include ++ ++#include ++#include ++#include ++ ++#include ++#include ++ ++#include "lirc.h" ++#include "lirc_dev.h" ++ ++#define LIRC_DRIVER_NAME "lirc_ite8709" ++ ++#define BUF_CHUNK_SIZE sizeof(int) ++#define BUF_SIZE (128*BUF_CHUNK_SIZE) ++ ++/******************************************************************************* ++* The ITE8709 device seems to be the combination of IT8512 superIO chip and * ++* a specific firmware running on the IT8512's embedded micro-controller. * ++* In addition of the embedded micro-controller, the IT8512 chip contains a * ++* CIR module and several other modules. A few modules are directly accessible * ++* by the host CPU, but most of them are only accessible by the * ++* micro-controller. The CIR module is only accessible by the micro-controller. * ++* The battery-backed SRAM module is accessible by the host CPU and the * ++* micro-controller. So one of the MC's firmware role is to act as a bridge * ++* between the host CPU and the CIR module. The firmware implements a kind of * ++* communication protocol using the SRAM module as a shared memory. The IT8512 * ++* specification is publicly available on ITE's web site, but the communication * ++* protocol is not, so it was reverse-engineered. * ++*******************************************************************************/ ++ ++/* ITE8709 Registers addresses and values (reverse-engineered) */ ++#define ITE8709_MODE 0x1a ++#define ITE8709_REG_ADR 0x1b ++#define ITE8709_REG_VAL 0x1c ++#define ITE8709_IIR 0x1e /* Interrupt identification register */ ++#define ITE8709_RFSR 0x1f /* Receiver FIFO status register */ ++#define ITE8709_FIFO_START 0x20 ++ ++#define ITE8709_MODE_READY 0X00 ++#define ITE8709_MODE_WRITE 0X01 ++#define ITE8709_MODE_READ 0X02 ++#define ITE8709_IIR_RDAI 0x02 /* Receiver data available interrupt */ ++#define ITE8709_IIR_RFOI 0x04 /* Receiver FIFO overrun interrupt */ ++#define ITE8709_RFSR_MASK 0x3f /* FIFO byte count mask */ ++ ++/* IT8512 CIR-module registers addresses and values (from IT8512 E/F */ ++/* specification v0.4.1) */ ++#define IT8512_REG_MSTCR 0x01 /* Master control register */ ++#define IT8512_REG_IER 0x02 /* Interrupt enable register */ ++#define IT8512_REG_CFR 0x04 /* Carrier frequency register */ ++#define IT8512_REG_RCR 0x05 /* Receive control register */ ++#define IT8512_REG_BDLR 0x08 /* Baud rate divisor low byte register */ ++#define IT8512_REG_BDHR 0x09 /* Baud rate divisor high byte register */ ++ ++#define IT8512_MSTCR_RESET 0x01 /* Reset registers to default value */ ++#define IT8512_MSTCR_FIFOCLR 0x02 /* Clear FIFO */ ++#define IT8512_MSTCR_FIFOTL_7 0x04 /* FIFO threshold level : 7 */ ++#define IT8512_MSTCR_FIFOTL_25 0x0c /* FIFO threshold level : 25 */ ++#define IT8512_IER_RDAIE 0x02 /* Enable data interrupt request */ ++#define IT8512_IER_RFOIE 0x04 /* Enable FIFO overrun interrupt req */ ++#define IT8512_IER_IEC 0x80 /* Enable interrupt request */ ++#define IT8512_CFR_CF_36KHZ 0x09 /* Carrier freq : low speed, 36kHz */ ++#define IT8512_RCR_RXDCR_1 0x01 /* Demodulation carrier range : 1 */ ++#define IT8512_RCR_RXACT 0x08 /* Receiver active */ ++#define IT8512_RCR_RXEN 0x80 /* Receiver enable */ ++#define IT8512_BDR_6 6 /* Baud rate divisor : 6 */ ++ ++/* Actual values used by this driver */ ++#define CFG_FIFOTL IT8512_MSTCR_FIFOTL_25 ++#define CFG_CR_FREQ IT8512_CFR_CF_36KHZ ++#define CFG_DCR IT8512_RCR_RXDCR_1 ++#define CFG_BDR IT8512_BDR_6 ++#define CFG_TIMEOUT 100000 /* Rearm interrupt when a space is > 100 ms */ ++ ++static int debug; ++ ++struct ite8709_device { ++ int use_count; ++ int io; ++ int irq; ++ spinlock_t hardware_lock; ++ unsigned long long acc_pulse; ++ unsigned long long acc_space; ++ char lastbit; ++ struct timeval last_tv; ++ struct lirc_plugin plugin; ++ struct lirc_buffer buffer; ++ struct tasklet_struct tasklet; ++ char force_rearm; ++ char rearmed; ++ char device_busy; ++}; ++ ++#define dprintk(fmt, args...) \ ++ do { \ ++ if (debug) \ ++ printk(KERN_DEBUG LIRC_DRIVER_NAME ": " \ ++ fmt, ## args); \ ++ } while (0) ++ ++ ++static unsigned char ite8709_read(struct ite8709_device *dev, ++ unsigned char port) ++{ ++ outb(port, dev->io); ++ return inb(dev->io+1); ++} ++ ++static void ite8709_write(struct ite8709_device *dev, unsigned char port, ++ unsigned char data) ++{ ++ outb(port, dev->io); ++ outb(data, dev->io+1); ++} ++ ++static void ite8709_wait_device(struct ite8709_device *dev) ++{ ++ int i = 0; ++ /* loop until device tells it's ready to continue */ ++ /* iterations count is usually ~750 but can sometimes achieve 13000 */ ++ for (i = 0; i < 15000; i++) { ++ udelay(2); ++ if (ite8709_read(dev, ITE8709_MODE) == ITE8709_MODE_READY) ++ break; ++ } ++} ++ ++static void ite8709_write_register(struct ite8709_device *dev, ++ unsigned char reg_adr, unsigned char reg_value) ++{ ++ ite8709_wait_device(dev); ++ ++ ite8709_write(dev, ITE8709_REG_VAL, reg_value); ++ ite8709_write(dev, ITE8709_REG_ADR, reg_adr); ++ ite8709_write(dev, ITE8709_MODE, ITE8709_MODE_WRITE); ++} ++ ++static void ite8709_init_hardware(struct ite8709_device *dev) ++{ ++ spin_lock_irq(&dev->hardware_lock); ++ dev->device_busy = 1; ++ spin_unlock_irq(&dev->hardware_lock); ++ ++ ite8709_write_register(dev, IT8512_REG_BDHR, (CFG_BDR >> 8) & 0xff); ++ ite8709_write_register(dev, IT8512_REG_BDLR, CFG_BDR & 0xff); ++ ite8709_write_register(dev, IT8512_REG_CFR, CFG_CR_FREQ); ++ ite8709_write_register(dev, IT8512_REG_IER, ++ IT8512_IER_IEC | IT8512_IER_RFOIE | IT8512_IER_RDAIE); ++ ite8709_write_register(dev, IT8512_REG_RCR, CFG_DCR); ++ ite8709_write_register(dev, IT8512_REG_MSTCR, ++ CFG_FIFOTL | IT8512_MSTCR_FIFOCLR); ++ ite8709_write_register(dev, IT8512_REG_RCR, ++ IT8512_RCR_RXEN | IT8512_RCR_RXACT | CFG_DCR); ++ ++ spin_lock_irq(&dev->hardware_lock); ++ dev->device_busy = 0; ++ spin_unlock_irq(&dev->hardware_lock); ++ ++ tasklet_enable(&dev->tasklet); ++} ++ ++static void ite8709_drop_hardware(struct ite8709_device *dev) ++{ ++ tasklet_disable(&dev->tasklet); ++ ++ spin_lock_irq(&dev->hardware_lock); ++ dev->device_busy = 1; ++ spin_unlock_irq(&dev->hardware_lock); ++ ++ ite8709_write_register(dev, IT8512_REG_RCR, 0); ++ ite8709_write_register(dev, IT8512_REG_MSTCR, ++ IT8512_MSTCR_RESET | IT8512_MSTCR_FIFOCLR); ++ ++ spin_lock_irq(&dev->hardware_lock); ++ dev->device_busy = 0; ++ spin_unlock_irq(&dev->hardware_lock); ++} ++ ++static int ite8709_set_use_inc(void *data) ++{ ++ struct ite8709_device *dev; ++ dev = data; ++ if (dev->use_count == 0) ++ ite8709_init_hardware(dev); ++ dev->use_count++; ++ return 0; ++} ++ ++static void ite8709_set_use_dec(void *data) ++{ ++ struct ite8709_device *dev; ++ dev = data; ++ dev->use_count--; ++ if (dev->use_count == 0) ++ ite8709_drop_hardware(dev); ++} ++ ++static void ite8709_add_read_queue(struct ite8709_device *dev, int flag, ++ unsigned long long val) ++{ ++ int value; ++ ++ dprintk("add a %llu usec %s\n", val, flag ? "pulse" : "space"); ++ ++ value = (val > PULSE_MASK) ? PULSE_MASK : val; ++ if (flag) ++ value |= PULSE_BIT; ++ ++ if (!lirc_buffer_full(&dev->buffer)) { ++ lirc_buffer_write_1(&dev->buffer, (void *) &value); ++ wake_up(&dev->buffer.wait_poll); ++ } ++} ++ ++static irqreturn_t ite8709_interrupt(int irq, void *dev_id) ++{ ++ unsigned char data; ++ int iir, rfsr, i; ++ int fifo = 0; ++ char bit; ++ struct timeval curr_tv; ++ ++ /* Bit duration in microseconds */ ++ const unsigned long bit_duration = 1000000ul / (115200 / CFG_BDR); ++ ++ struct ite8709_device *dev; ++ dev = dev_id; ++ ++ /* If device is busy, we simply discard data because we are in one of */ ++ /* these two cases : shutting down or rearming the device, so this */ ++ /* doesn't really matter and this avoids waiting too long in IRQ ctx */ ++ spin_lock(&dev->hardware_lock); ++ if (dev->device_busy) { ++ spin_unlock(&dev->hardware_lock); ++ return IRQ_RETVAL(IRQ_HANDLED); ++ } ++ ++ iir = ite8709_read(dev, ITE8709_IIR); ++ ++ switch (iir) { ++ case ITE8709_IIR_RFOI: ++ dprintk("fifo overrun, scheduling forced rearm just in case\n"); ++ dev->force_rearm = 1; ++ tasklet_schedule(&dev->tasklet); ++ spin_unlock(&dev->hardware_lock); ++ return IRQ_RETVAL(IRQ_HANDLED); ++ ++ case ITE8709_IIR_RDAI: ++ rfsr = ite8709_read(dev, ITE8709_RFSR); ++ fifo = rfsr & ITE8709_RFSR_MASK; ++ if (fifo > 32) ++ fifo = 32; ++ dprintk("iir: 0x%x rfsr: 0x%x fifo: %d\n", iir, rfsr, fifo); ++ ++ if (dev->rearmed) { ++ do_gettimeofday(&curr_tv); ++ dev->acc_space += 1000000ull ++ * (curr_tv.tv_sec - dev->last_tv.tv_sec) ++ + (curr_tv.tv_usec - dev->last_tv.tv_usec); ++ dev->rearmed = 0; ++ } ++ for (i = 0; i < fifo; i++) { ++ data = ite8709_read(dev, i+ITE8709_FIFO_START); ++ data = ~data; ++ /* Loop through */ ++ for (bit = 0; bit < 8; ++bit) { ++ if ((data >> bit) & 1) { ++ dev->acc_pulse += bit_duration; ++ if (dev->lastbit == 0) { ++ ite8709_add_read_queue(dev, 0, ++ dev->acc_space); ++ dev->acc_space = 0; ++ } ++ } else { ++ dev->acc_space += bit_duration; ++ if (dev->lastbit == 1) { ++ ite8709_add_read_queue(dev, 1, ++ dev->acc_pulse); ++ dev->acc_pulse = 0; ++ } ++ } ++ dev->lastbit = (data >> bit) & 1; ++ } ++ } ++ ite8709_write(dev, ITE8709_RFSR, 0); ++ ++ if (dev->acc_space > CFG_TIMEOUT) { ++ dprintk("scheduling rearm IRQ\n"); ++ do_gettimeofday(&dev->last_tv); ++ dev->force_rearm = 0; ++ tasklet_schedule(&dev->tasklet); ++ } ++ ++ spin_unlock(&dev->hardware_lock); ++ return IRQ_RETVAL(IRQ_HANDLED); ++ ++ default: ++ /* not our irq */ ++ dprintk("unknown IRQ (shouldn't happen) !!\n"); ++ spin_unlock(&dev->hardware_lock); ++ return IRQ_RETVAL(IRQ_NONE); ++ } ++} ++ ++static void ite8709_rearm_irq(unsigned long data) ++{ ++ struct ite8709_device *dev; ++ unsigned long flags; ++ dev = (struct ite8709_device *) data; ++ ++ spin_lock_irqsave(&dev->hardware_lock, flags); ++ dev->device_busy = 1; ++ spin_unlock_irqrestore(&dev->hardware_lock, flags); ++ ++ if (dev->force_rearm || dev->acc_space > CFG_TIMEOUT) { ++ dprintk("rearming IRQ\n"); ++ ite8709_write_register(dev, IT8512_REG_RCR, ++ IT8512_RCR_RXACT | CFG_DCR); ++ ite8709_write_register(dev, IT8512_REG_MSTCR, ++ CFG_FIFOTL | IT8512_MSTCR_FIFOCLR); ++ ite8709_write_register(dev, IT8512_REG_RCR, ++ IT8512_RCR_RXEN | IT8512_RCR_RXACT | CFG_DCR); ++ if (!dev->force_rearm) ++ dev->rearmed = 1; ++ dev->force_rearm = 0; ++ } ++ ++ spin_lock_irqsave(&dev->hardware_lock, flags); ++ dev->device_busy = 0; ++ spin_unlock_irqrestore(&dev->hardware_lock, flags); ++} ++ ++static int ite8709_cleanup(struct ite8709_device *dev, int stage, int errno, ++ char *msg) ++{ ++ if (msg != NULL) ++ printk(KERN_ERR LIRC_DRIVER_NAME ": %s\n", msg); ++ ++ switch (stage) { ++ case 6: ++ if (dev->use_count > 0) ++ ite8709_drop_hardware(dev); ++ case 5: ++ free_irq(dev->irq, dev); ++ case 4: ++ release_region(dev->io, 2); ++ case 3: ++ lirc_unregister_plugin(dev->plugin.minor); ++ case 2: ++ lirc_buffer_free(dev->plugin.rbuf); ++ case 1: ++ kfree(dev); ++ case 0: ++ ; ++ } ++ ++ return errno; ++} ++ ++static int __devinit ite8709_pnp_probe(struct pnp_dev *dev, ++ const struct pnp_device_id *dev_id) ++{ ++ struct lirc_plugin *plugin; ++ struct ite8709_device *ite8709_dev; ++ int ret; ++ ++ /* Check resources validity */ ++ if (!pnp_irq_valid(dev, 0)) ++ return ite8709_cleanup(NULL, 0, -ENODEV, "invalid IRQ"); ++ if (!pnp_port_valid(dev, 2)) ++ return ite8709_cleanup(NULL, 0, -ENODEV, "invalid IO port"); ++ ++ /* Allocate memory for device struct */ ++ ite8709_dev = kzalloc(sizeof(struct ite8709_device), GFP_KERNEL); ++ if (ite8709_dev == NULL) ++ return ite8709_cleanup(NULL, 0, -ENOMEM, "kzalloc failed"); ++ pnp_set_drvdata(dev, ite8709_dev); ++ ++ /* Initialize device struct */ ++ ite8709_dev->use_count = 0; ++ ite8709_dev->irq = pnp_irq(dev, 0); ++ ite8709_dev->io = pnp_port_start(dev, 2); ++ ite8709_dev->hardware_lock = __SPIN_LOCK_UNLOCKED( ++ ite8709_dev->hardware_lock); ++ ite8709_dev->acc_pulse = 0; ++ ite8709_dev->acc_space = 0; ++ ite8709_dev->lastbit = 0; ++ do_gettimeofday(&ite8709_dev->last_tv); ++ tasklet_init(&ite8709_dev->tasklet, ite8709_rearm_irq, ++ (long) ite8709_dev); ++ ite8709_dev->force_rearm = 0; ++ ite8709_dev->rearmed = 0; ++ ite8709_dev->device_busy = 0; ++ ++ /* Initialize plugin struct */ ++ plugin = &ite8709_dev->plugin; ++ strcpy(plugin->name, LIRC_DRIVER_NAME); ++ plugin->minor = -1; ++ plugin->code_length = sizeof(int) * 8; ++ plugin->sample_rate = 0; ++ plugin->features = LIRC_CAN_REC_MODE2; ++ plugin->data = ite8709_dev; ++ plugin->add_to_buf = NULL; ++ plugin->get_queue = NULL; ++ plugin->rbuf = &ite8709_dev->buffer; ++ plugin->set_use_inc = ite8709_set_use_inc; ++ plugin->set_use_dec = ite8709_set_use_dec; ++ plugin->ioctl = NULL; ++ plugin->fops = NULL; ++ plugin->dev = &dev->dev; ++ plugin->owner = THIS_MODULE; ++ ++ /* Initialize LIRC buffer */ ++ if (lirc_buffer_init(plugin->rbuf, BUF_CHUNK_SIZE, BUF_SIZE)) ++ return ite8709_cleanup(ite8709_dev, 1, -ENOMEM, ++ "lirc_buffer_init() failed"); ++ ++ /* Register LIRC plugin */ ++ ret = lirc_register_plugin(plugin); ++ if (ret < 0) ++ return ite8709_cleanup(ite8709_dev, 2, ret, ++ "lirc_register_plugin() failed"); ++ ++ /* Reserve I/O port access */ ++ if (!request_region(ite8709_dev->io, 2, LIRC_DRIVER_NAME)) ++ return ite8709_cleanup(ite8709_dev, 3, -EBUSY, ++ "i/o port already in use"); ++ ++ /* Reserve IRQ line */ ++ ret = request_irq(ite8709_dev->irq, ite8709_interrupt, 0, ++ LIRC_DRIVER_NAME, ite8709_dev); ++ if (ret < 0) ++ return ite8709_cleanup(ite8709_dev, 4, ret, ++ "IRQ already in use"); ++ ++ /* Initialize hardware */ ++ ite8709_drop_hardware(ite8709_dev); /* Shutdown hw until first use */ ++ ++ printk(KERN_INFO LIRC_DRIVER_NAME ": device found : irq=%d io=0x%x\n", ++ ite8709_dev->irq, ite8709_dev->io); ++ ++ return 0; ++} ++ ++static void __devexit ite8709_pnp_remove(struct pnp_dev *dev) ++{ ++ struct ite8709_device *ite8709_dev; ++ ite8709_dev = pnp_get_drvdata(dev); ++ ++ ite8709_cleanup(ite8709_dev, 6, 0, NULL); ++ ++ printk(KERN_INFO LIRC_DRIVER_NAME ": device removed\n"); ++} ++ ++#ifdef CONFIG_PM ++static int ite8709_pnp_suspend(struct pnp_dev *dev, pm_message_t state) ++{ ++ struct ite8709_device *ite8709_dev; ++ ite8709_dev = pnp_get_drvdata(dev); ++ ++ if (ite8709_dev->use_count > 0) ++ ite8709_drop_hardware(ite8709_dev); ++ ++ return 0; ++} ++ ++static int ite8709_pnp_resume(struct pnp_dev *dev) ++{ ++ struct ite8709_device *ite8709_dev; ++ ite8709_dev = pnp_get_drvdata(dev); ++ ++ if (ite8709_dev->use_count > 0) ++ ite8709_init_hardware(ite8709_dev); ++ ++ return 0; ++} ++#else ++#define ite8709_pnp_suspend NULL ++#define ite8709_pnp_resume NULL ++#endif ++ ++static const struct pnp_device_id pnp_dev_table[] = { ++ {"ITE8709", 0}, ++ {} ++}; ++ ++MODULE_DEVICE_TABLE(pnp, pnp_dev_table); ++ ++static struct pnp_driver ite8709_pnp_driver = { ++ .name = LIRC_DRIVER_NAME, ++ .probe = ite8709_pnp_probe, ++ .remove = __devexit_p(ite8709_pnp_remove), ++ .suspend = ite8709_pnp_suspend, ++ .resume = ite8709_pnp_resume, ++ .id_table = pnp_dev_table, ++}; ++ ++int init_module(void) ++{ ++ return pnp_register_driver(&ite8709_pnp_driver); ++} ++ ++void cleanup_module(void) ++{ ++ pnp_unregister_driver(&ite8709_pnp_driver); ++} ++ ++MODULE_DESCRIPTION("LIRC driver for ITE8709 CIR port"); ++MODULE_AUTHOR("Grégory Lardière"); ++MODULE_LICENSE("GPL"); ++ ++module_param(debug, bool, 0644); ++MODULE_PARM_DESC(debug, "Enable debugging messages"); ++ ++/* ++ * Overrides for Emacs so that we follow Linus's tabbing style. ++ * --------------------------------------------------------------------------- ++ * Local variables: ++ * c-basic-offset: 8 ++ * End: ++ */ +diff --git a/drivers/input/lirc/lirc_mceusb.c b/drivers/input/lirc/lirc_mceusb.c +new file mode 100644 +index 0000000..f1874f3 +--- /dev/null ++++ b/drivers/input/lirc/lirc_mceusb.c +@@ -0,0 +1,890 @@ ++/* ++ * USB Microsoft IR Transceiver driver - 0.2 ++ * ++ * Copyright (c) 2003-2004 Dan Conti (dconti@acm.wwu.edu) ++ * ++ * This driver is based on the USB skeleton driver packaged with the ++ * kernel, and the notice from that package has been retained below. ++ * ++ * The Microsoft IR Transceiver is a neat little IR receiver with two ++ * emitters on it designed for Windows Media Center. This driver might ++ * work for all media center remotes, but I have only tested it with ++ * the philips model. The first revision of this driver only supports ++ * the receive function - the transmit function will be much more ++ * tricky due to the nature of the hardware. Microsoft chose to build ++ * this device inexpensively, therefore making it extra dumb. ++ * There is no interrupt endpoint on this device; all usb traffic ++ * happens over two bulk endpoints. As a result of this, poll() for ++ * this device is an actual hardware poll (instead of a receive queue ++ * check) and is rather expensive. ++ * ++ * All trademarks property of their respective owners. This driver was ++ * originally based on the USB skeleton driver, although significant ++ * portions of that code have been removed as the driver has evolved. ++ * ++ * 2003_11_11 - Restructured to minimalize code interpretation in the ++ * driver. The normal use case will be with lirc. ++ * ++ * 2004_01_01 - Removed all code interpretation. Generate mode2 data ++ * for passing off to lirc. Cleanup ++ * ++ * 2004_01_04 - Removed devfs handle. Put in a temporary workaround ++ * for a known issue where repeats generate two ++ * sequential spaces (last_was_repeat_gap) ++ * ++ * 2004_02_17 - Changed top level api to no longer use fops, and ++ * instead use new interface for polling via ++ * lirc_thread. Restructure data read/mode2 generation to ++ * a single pass, reducing number of buffers. Rev to .2 ++ * ++ * 2004_02_27 - Last of fixups to plugin->add_to_buf API. Properly ++ * handle broken fragments from the receiver. Up the ++ * sample rate and remove any pacing from ++ * fetch_more_data. Fixes all known issues. ++ * ++ * TODO ++ * - Fix up minor number, registration of major/minor with usb subsystem ++ * ++ */ ++/* ++ * USB Skeleton driver - 1.1 ++ * ++ * Copyright (C) 2001-2003 Greg Kroah-Hartman (greg@kroah.com) ++ * ++ * This program is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License as ++ * published by the Free Software Foundation, version 2. ++ * ++ * ++ * This driver is to be used as a skeleton driver to be able to create a ++ * USB driver quickly. The design of it is based on the usb-serial and ++ * dc2xx drivers. ++ * ++ * Thanks to Oliver Neukum, David Brownell, and Alan Stern for their help ++ * in debugging this driver. ++ * ++ * ++ * History: ++ * ++ * 2003-05-06 - 1.1 - changes due to usb core changes with usb_register_dev() ++ * 2003-02-25 - 1.0 - fix races involving urb->status, unlink_urb(), and ++ * disconnect. Fix transfer amount in read(). Use ++ * macros instead of magic numbers in probe(). Change ++ * size variables to size_t. Show how to eliminate ++ * DMA bounce buffer. ++ * 2002_12_12 - 0.9 - compile fixes and got rid of fixed minor array. ++ * 2002_09_26 - 0.8 - changes due to USB core conversion to struct device ++ * driver. ++ * 2002_02_12 - 0.7 - zero out dev in probe function for devices that do ++ * not have both a bulk in and bulk out endpoint. ++ * Thanks to Holger Waechtler for the fix. ++ * 2001_11_05 - 0.6 - fix minor locking problem in skel_disconnect. ++ * Thanks to Pete Zaitcev for the fix. ++ * 2001_09_04 - 0.5 - fix devfs bug in skel_disconnect. Thanks to wim delvaux ++ * 2001_08_21 - 0.4 - more small bug fixes. ++ * 2001_05_29 - 0.3 - more bug fixes based on review from linux-usb-devel ++ * 2001_05_24 - 0.2 - bug fixes based on review from linux-usb-devel people ++ * 2001_05_01 - 0.1 - first version ++ * ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#ifdef CONFIG_USB_DEBUG ++static int debug = 1; ++#else ++static int debug; ++#endif ++ ++#include "lirc.h" ++#include "lirc_dev.h" ++ ++/* Use our own dbg macro */ ++#define dprintk(fmt, args...) \ ++ do { \ ++ if (debug) \ ++ printk(KERN_DEBUG __FILE__ ": " \ ++ fmt "\n", ## args); \ ++ } while (0) ++ ++/* Version Information */ ++#define DRIVER_VERSION "v0.2" ++#define DRIVER_AUTHOR "Dan Conti, dconti@acm.wwu.edu" ++#define DRIVER_DESC "USB Microsoft IR Transceiver Driver" ++#define DRIVER_NAME "lirc_mceusb" ++ ++/* Define these values to match your device */ ++#define USB_MCEUSB_VENDOR_ID 0x045e ++#define USB_MCEUSB_PRODUCT_ID 0x006d ++ ++/* table of devices that work with this driver */ ++static struct usb_device_id mceusb_table[] = { ++ { USB_DEVICE(USB_MCEUSB_VENDOR_ID, USB_MCEUSB_PRODUCT_ID) }, ++ { } /* Terminating entry */ ++}; ++ ++/* we can have up to this number of device plugged in at once */ ++#define MAX_DEVICES 16 ++ ++/* Structure to hold all of our device specific stuff */ ++struct usb_skel { ++ struct usb_device *udev; /* save off the usb device pointer */ ++ struct usb_interface *interface; /* the interface for this device */ ++ unsigned char minor; /* the starting minor number for this device */ ++ unsigned char num_ports; /* the number of ports this device has */ ++ char num_interrupt_in; /* number of interrupt in endpoints */ ++ char num_bulk_in; /* number of bulk in endpoints */ ++ char num_bulk_out; /* number of bulk out endpoints */ ++ ++ unsigned char *bulk_in_buffer; /* the buffer to receive data */ ++ int bulk_in_size; /* the size of the receive buffer */ ++ __u8 bulk_in_endpointAddr; /* the address of bulk in endpoint */ ++ ++ unsigned char *bulk_out_buffer; /* the buffer to send data */ ++ int bulk_out_size; /* the size of the send buffer */ ++ struct urb *write_urb; /* the urb used to send data */ ++ __u8 bulk_out_endpointAddr; /* the address of bulk out endpoint */ ++ ++ atomic_t write_busy; /* true iff write urb is busy */ ++ struct completion write_finished; /* wait for the write to finish */ ++ ++ wait_queue_head_t wait_q; /* for timeouts */ ++ int open_count; /* number of times this port has been opened */ ++ struct mutex sem; /* locks this structure */ ++ ++ int present; /* if the device is not disconnected */ ++ ++ struct lirc_plugin *plugin; ++ ++ int lircdata[256]; /* place to store data until lirc processes it */ ++ int lircidx; /* current index */ ++ int lirccnt; /* remaining values */ ++ ++ int usb_valid_bytes_in_bulk_buffer; /* leftover data from prior read */ ++ int mce_bytes_left_in_packet; /* for packets split across reads */ ++ ++ /* Value to hold the last received space; 0 if last value ++ * received was a pulse */ ++ int last_space; ++ ++ dma_addr_t dma_in; ++ dma_addr_t dma_out; ++}; ++ ++#define MCE_TIME_UNIT 50 ++ ++/* driver api */ ++static int mceusb_probe(struct usb_interface *interface, ++ const struct usb_device_id *id); ++static void mceusb_disconnect(struct usb_interface *interface); ++static void mceusb_write_bulk_callback(struct urb *urb); ++ ++/* read data from the usb bus; convert to mode2 */ ++static int msir_fetch_more_data(struct usb_skel *dev, int dont_block); ++ ++/* helper functions */ ++static void msir_cleanup(struct usb_skel *dev); ++static void set_use_dec(void *data); ++static int set_use_inc(void *data); ++ ++/* array of pointers to our devices that are currently connected */ ++static struct usb_skel *minor_table[MAX_DEVICES]; ++ ++/* lock to protect the minor_table structure */ ++static DECLARE_MUTEX(minor_table_mutex); ++static void mceusb_setup(struct usb_device *udev); ++ ++/* usb specific object needed to register this driver with the usb subsystem */ ++static struct usb_driver mceusb_driver = { ++ .name = DRIVER_NAME, ++ .probe = mceusb_probe, ++ .disconnect = mceusb_disconnect, ++ .id_table = mceusb_table, ++}; ++ ++ ++/** ++ * usb_mceusb_debug_data ++ */ ++static inline void usb_mceusb_debug_data(const char *function, int size, ++ const unsigned char *data) ++{ ++ int i; ++ if (!debug) ++ return; ++ ++ printk(KERN_DEBUG __FILE__": %s - length = %d, data = ", ++ function, size); ++ for (i = 0; i < size; ++i) ++ printk(KERN_DEBUG "%.2x ", data[i]); ++ printk(KERN_DEBUG "\n"); ++} ++ ++/** ++ *mceusb_delete ++ */ ++static inline void mceusb_delete(struct usb_skel *dev) ++{ ++ dprintk("%s", __func__); ++ minor_table[dev->minor] = NULL; ++ usb_buffer_free(dev->udev, dev->bulk_in_size, ++ dev->bulk_in_buffer, dev->dma_in); ++ usb_buffer_free(dev->udev, dev->bulk_out_size, ++ dev->bulk_out_buffer, dev->dma_out); ++ if (dev->write_urb != NULL) ++ usb_free_urb(dev->write_urb); ++ kfree(dev); ++} ++ ++static void mceusb_setup(struct usb_device *udev) ++{ ++ char data[8]; ++ int res; ++ ++ memset(data, 0, 8); ++ ++ /* Get Status */ ++ res = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), ++ USB_REQ_GET_STATUS, USB_DIR_IN, ++ 0, 0, data, 2, HZ * 3); ++ ++ /* res = usb_get_status( udev, 0, 0, data ); */ ++ dprintk("%s - res = %d status = 0x%x 0x%x", __func__, ++ res, data[0], data[1]); ++ ++ /* This is a strange one. They issue a set address to the device ++ * on the receive control pipe and expect a certain value pair back ++ */ ++ memset(data, 0, 8); ++ ++ res = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), ++ 5, USB_TYPE_VENDOR, 0, 0, ++ data, 2, HZ * 3); ++ dprintk("%s - res = %d, devnum = %d", __func__, res, udev->devnum); ++ dprintk("%s - data[0] = %d, data[1] = %d", __func__, ++ data[0], data[1]); ++ ++ ++ /* set feature */ ++ res = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), ++ USB_REQ_SET_FEATURE, USB_TYPE_VENDOR, ++ 0xc04e, 0x0000, NULL, 0, HZ * 3); ++ ++ dprintk("%s - res = %d", __func__, res); ++ ++ /* These two are sent by the windows driver, but stall for ++ * me. I dont have an analyzer on the linux side so i can't ++ * see what is actually different and why the device takes ++ * issue with them ++ */ ++#if 0 ++ /* this is some custom control message they send */ ++ res = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), ++ 0x04, USB_TYPE_VENDOR, ++ 0x0808, 0x0000, NULL, 0, HZ * 3); ++ ++ dprintk("%s - res = %d", __func__, res); ++ ++ /* this is another custom control message they send */ ++ res = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), ++ 0x02, USB_TYPE_VENDOR, ++ 0x0000, 0x0100, NULL, 0, HZ * 3); ++ ++ dprintk("%s - res = %d", __func__, res); ++#endif ++} ++ ++static void msir_cleanup(struct usb_skel *dev) ++{ ++ memset(dev->bulk_in_buffer, 0, dev->bulk_in_size); ++ ++ dev->usb_valid_bytes_in_bulk_buffer = 0; ++ ++ dev->last_space = PULSE_MASK; ++ ++ dev->mce_bytes_left_in_packet = 0; ++ dev->lircidx = 0; ++ dev->lirccnt = 0; ++ memset(dev->lircdata, 0, sizeof(dev->lircdata)); ++} ++ ++static int set_use_inc(void *data) ++{ ++ return 0; ++} ++ ++static void set_use_dec(void *data) ++{ ++} ++ ++/* ++ * msir_fetch_more_data ++ * ++ * The goal here is to read in more remote codes from the remote. In ++ * the event that the remote isn't sending us anything, the caller ++ * will block until a key is pressed (i.e. this performs phys read, ++ * filtering, and queueing of data) unless dont_block is set to 1; in ++ * this situation, it will perform a few reads and will exit out if it ++ * does not see any appropriate data ++ * ++ * dev->sem should be locked when this function is called - fine grain ++ * locking isn't really important here anyways ++ * ++ * This routine always returns the number of words available ++ * ++ */ ++static int msir_fetch_more_data(struct usb_skel *dev, int dont_block) ++{ ++ int retries = 0; ++ int words_to_read = ++ (sizeof(dev->lircdata)/sizeof(int)) - dev->lirccnt; ++ int partial, this_read = 0; ++ int bulkidx = 0; ++ int bytes_left_in_packet = 0; ++ signed char *signedp = (signed char *)dev->bulk_in_buffer; ++ ++ if (words_to_read == 0) ++ return dev->lirccnt; ++ ++ /* this forces all existing data to be read by lirc before we ++ * issue another usb command. this is the only form of ++ * throttling we have ++ */ ++ if (dev->lirccnt) ++ return dev->lirccnt; ++ ++ /* reserve room for our leading space */ ++ if (dev->last_space) ++ words_to_read--; ++ ++ while (words_to_read) { ++ /* handle signals and USB disconnects */ ++ if (signal_pending(current)) ++ return dev->lirccnt ? dev->lirccnt : -EINTR; ++ if (!dev->udev) ++ return -ENODEV; ++ ++ bulkidx = 0; ++ ++ /* ++ * perform data read (phys or from previous buffer) ++ */ ++ ++ /* use leftovers if present, otherwise perform a read */ ++ if (dev->usb_valid_bytes_in_bulk_buffer) { ++ this_read = dev->usb_valid_bytes_in_bulk_buffer; ++ partial = this_read; ++ dev->usb_valid_bytes_in_bulk_buffer = 0; ++ } else { ++ int retval; ++ ++ this_read = dev->bulk_in_size; ++ partial = 0; ++ retval = usb_bulk_msg(dev->udev, ++ usb_rcvbulkpipe(dev->udev, ++ dev->bulk_in_endpointAddr), ++ (unsigned char *)dev->bulk_in_buffer, ++ this_read, &partial, HZ*10); ++ ++ /* retry a few times on overruns; map all ++ other errors to -EIO */ ++ if (retval) { ++ if (retval == -EOVERFLOW && retries < 5) { ++ retries++; ++ interruptible_sleep_on_timeout( ++ &dev->wait_q, HZ); ++ continue; ++ } else ++ return -EIO; ++ } ++ ++ retries = 0; ++ if (partial) ++ this_read = partial; ++ ++ /* skip the header */ ++ bulkidx += 2; ++ ++ /* check for empty reads (header only) */ ++ if (this_read == 2) { ++ /* assume no data */ ++ if (dont_block) ++ break; ++ ++ /* sleep for a bit before performing ++ another read */ ++ interruptible_sleep_on_timeout(&dev->wait_q, 1); ++ continue; ++ } ++ } ++ ++ /* ++ * process data ++ */ ++ ++ /* at this point this_read is > 0 */ ++ while (bulkidx < this_read && ++ (words_to_read > (dev->last_space ? 1 : 0))) { ++ /* while( bulkidx < this_read && words_to_read) */ ++ int keycode; ++ int pulse = 0; ++ ++ /* read packet length if needed */ ++ if (!bytes_left_in_packet) { ++ /* we assume we are on a packet length ++ * value. it is possible, in some ++ * cases, to get a packet that does ++ * not start with a length, apparently ++ * due to some sort of fragmenting, ++ * but occaisonally we do not receive ++ * the second half of a fragment ++ */ ++ bytes_left_in_packet = ++ 128 + signedp[bulkidx++]; ++ ++ /* unfortunately rather than keep all ++ * the data in the packetized format, ++ * the transceiver sends a trailing 8 ++ * bytes that aren't part of the ++ * transmittion from the remote, ++ * aren't packetized, and dont really ++ * have any value. we can basically ++ * tell we have hit them if 1) we have ++ * a loooong space currently stored ++ * up, and 2) the bytes_left value for ++ * this packet is obviously wrong ++ */ ++ if (bytes_left_in_packet > 4) { ++ if (dev->mce_bytes_left_in_packet) { ++ bytes_left_in_packet = ++ dev->mce_bytes_left_in_packet; ++ bulkidx--; ++ } ++ bytes_left_in_packet = 0; ++ bulkidx = this_read; ++ } ++ ++ /* always clear this if we have a ++ valid packet */ ++ dev->mce_bytes_left_in_packet = 0; ++ ++ /* continue here to verify we haven't ++ hit the end of the bulk_in */ ++ continue; ++ ++ } ++ ++ /* ++ * generate mode2 ++ */ ++ ++ keycode = signedp[bulkidx++]; ++ if (keycode < 0) { ++ pulse = 1; ++ keycode += 128; ++ } ++ keycode *= MCE_TIME_UNIT; ++ ++ bytes_left_in_packet--; ++ ++ if (pulse) { ++ if (dev->last_space) { ++ dev->lircdata[dev->lirccnt++] = ++ dev->last_space; ++ dev->last_space = 0; ++ words_to_read--; ++ ++ /* clear for the pulse */ ++ dev->lircdata[dev->lirccnt] = 0; ++ } ++ dev->lircdata[dev->lirccnt] += keycode; ++ dev->lircdata[dev->lirccnt] |= PULSE_BIT; ++ } else { ++ /* on pulse->space transition, add one ++ for the existing pulse */ ++ if (dev->lircdata[dev->lirccnt] && ++ !dev->last_space) { ++ dev->lirccnt++; ++ words_to_read--; ++ } ++ ++ dev->last_space += keycode; ++ } ++ } ++ } ++ ++ /* save off some info if we are exiting mid-packet, or with ++ leftovers */ ++ if (bytes_left_in_packet) ++ dev->mce_bytes_left_in_packet = bytes_left_in_packet; ++ if (bulkidx < this_read) { ++ dev->usb_valid_bytes_in_bulk_buffer = (this_read - bulkidx); ++ memcpy(dev->bulk_in_buffer, &(dev->bulk_in_buffer[bulkidx]), ++ dev->usb_valid_bytes_in_bulk_buffer); ++ } ++ return dev->lirccnt; ++} ++ ++/* mceusb_add_to_buf: called by lirc_dev to fetch all available keys ++ * this is used as a polling interface for us: since we set ++ * plugin->sample_rate we will periodically get the below call to ++ * check for new data returns 0 on success, or -ENODATA if nothing is ++ * available ++ */ ++static int mceusb_add_to_buf(void *data, struct lirc_buffer *buf) ++{ ++ struct usb_skel *dev = (struct usb_skel *) data; ++ ++ mutex_lock(&dev->sem); ++ ++ /* verify device still present */ ++ if (dev->udev == NULL) { ++ mutex_unlock(&dev->sem); ++ return -ENODEV; ++ } ++ ++ if (!dev->lirccnt) { ++ int res; ++ dev->lircidx = 0; ++ ++ res = msir_fetch_more_data(dev, 1); ++ ++ if (res == 0) ++ res = -ENODATA; ++ if (res < 0) { ++ mutex_unlock(&dev->sem); ++ return res; ++ } ++ } ++ ++ if (dev->lirccnt) { ++ int keys_to_copy; ++ ++ /* determine available buffer space and available data */ ++ keys_to_copy = lirc_buffer_available(buf); ++ if (keys_to_copy > dev->lirccnt) ++ keys_to_copy = dev->lirccnt; ++ ++ lirc_buffer_write_n(buf, ++ (unsigned char *) &(dev->lircdata[dev->lircidx]), ++ keys_to_copy); ++ dev->lircidx += keys_to_copy; ++ dev->lirccnt -= keys_to_copy; ++ ++ mutex_unlock(&dev->sem); ++ return 0; ++ } ++ ++ mutex_unlock(&dev->sem); ++ return -ENODATA; ++} ++ ++/** ++ * mceusb_write_bulk_callback ++ */ ++static void mceusb_write_bulk_callback(struct urb *urb) ++{ ++ struct usb_skel *dev = (struct usb_skel *)urb->context; ++ ++ dprintk("%s - minor %d", __func__, dev->minor); ++ ++ if ((urb->status != -ENOENT) && ++ (urb->status != -ECONNRESET)) { ++ dprintk("%s - nonzero write buld status received: %d", ++ __func__, urb->status); ++ return; ++ } ++ ++ return; ++} ++ ++/** ++ * mceusb_probe ++ * ++ * Called by the usb core when a new device is connected that it ++ * thinks this driver might be interested in. ++ */ ++static int mceusb_probe(struct usb_interface *interface, ++ const struct usb_device_id *id) ++{ ++ struct usb_device *udev = interface_to_usbdev(interface); ++ struct usb_host_interface *iface_desc; ++ struct usb_skel *dev = NULL; ++ struct usb_endpoint_descriptor *endpoint; ++ ++ struct lirc_plugin *plugin; ++ struct lirc_buffer *rbuf; ++ ++ int minor; ++ size_t buffer_size; ++ int i; ++ int retval = -ENOMEM; ++ char junk[64]; ++ int partial = 0; ++ ++ /* See if the device offered us matches what we can accept */ ++ if (cpu_to_le16(udev->descriptor.idVendor) != USB_MCEUSB_VENDOR_ID || ++ cpu_to_le16(udev->descriptor.idProduct) != USB_MCEUSB_PRODUCT_ID) { ++ dprintk("Wrong Vendor/Product IDs"); ++ return -ENODEV; ++ } ++ ++ /* select a "subminor" number (part of a minor number) */ ++ down(&minor_table_mutex); ++ for (minor = 0; minor < MAX_DEVICES; ++minor) { ++ if (minor_table[minor] == NULL) ++ break; ++ } ++ if (minor >= MAX_DEVICES) { ++ info("Too many devices plugged in, " ++ "can not handle this device."); ++ goto error; ++ } ++ ++ /* allocate memory for our device state and initialize it */ ++ dev = kmalloc(sizeof(struct usb_skel), GFP_KERNEL); ++ if (dev == NULL) { ++ err("Out of memory"); ++ retval = -ENOMEM; ++ goto error; ++ } ++ minor_table[minor] = dev; ++ ++ memset(dev, 0x00, sizeof(*dev)); ++ mutex_init(&dev->sem); ++ dev->udev = udev; ++ dev->interface = interface; ++ dev->minor = minor; ++ ++ /* set up the endpoint information */ ++ /* check out the endpoints */ ++ /* use only the first bulk-in and bulk-out endpoints */ ++ iface_desc = interface->cur_altsetting; ++ ++ for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { ++ endpoint = &iface_desc->endpoint[i].desc; ++ if ((endpoint->bEndpointAddress & USB_ENDPOINT_DIR_MASK) && ++ ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == ++ USB_ENDPOINT_XFER_BULK)) { ++ dprintk("we found a bulk in endpoint"); ++ buffer_size = endpoint->wMaxPacketSize; ++ dev->bulk_in_size = buffer_size; ++ dev->bulk_in_endpointAddr = endpoint->bEndpointAddress; ++ dev->bulk_in_buffer = ++ usb_buffer_alloc(udev, buffer_size, ++ GFP_ATOMIC, &dev->dma_in); ++ if (!dev->bulk_in_buffer) { ++ err("Couldn't allocate bulk_in_buffer"); ++ goto error; ++ } ++ } ++ ++ if (((endpoint->bEndpointAddress & USB_ENDPOINT_DIR_MASK) ++ == 0x00) ++ && ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == ++ USB_ENDPOINT_XFER_BULK)) { ++ dprintk("we found a bulk out endpoint"); ++ dev->write_urb = usb_alloc_urb(0, GFP_KERNEL); ++ if (!dev->write_urb) { ++ err("No free urbs available"); ++ goto error; ++ } ++ buffer_size = endpoint->wMaxPacketSize; ++ dev->bulk_out_size = buffer_size; ++ dev->bulk_out_endpointAddr = endpoint->bEndpointAddress; ++ dev->bulk_out_buffer = ++ usb_buffer_alloc(udev, buffer_size, ++ GFP_ATOMIC, &dev->dma_out); ++ if (!dev->bulk_out_buffer) { ++ err("Couldn't allocate bulk_out_buffer"); ++ goto error; ++ } ++ usb_fill_bulk_urb(dev->write_urb, udev, ++ usb_sndbulkpipe ++ (udev, endpoint->bEndpointAddress), ++ dev->bulk_out_buffer, buffer_size, ++ mceusb_write_bulk_callback, dev); ++ } ++ } ++ ++ if (!(dev->bulk_in_endpointAddr && dev->bulk_out_endpointAddr)) { ++ err("Couldn't find both bulk-in and bulk-out endpoints"); ++ goto error; ++ } ++ ++ /* init the waitq */ ++ init_waitqueue_head(&dev->wait_q); ++ ++ ++ /* Set up our lirc plugin */ ++ plugin = kmalloc(sizeof(struct lirc_plugin), GFP_KERNEL); ++ if (!plugin) { ++ err("out of memory"); ++ goto error; ++ } ++ memset(plugin, 0, sizeof(struct lirc_plugin)); ++ ++ rbuf = kmalloc(sizeof(struct lirc_buffer), GFP_KERNEL); ++ if (!rbuf) { ++ err("out of memory"); ++ kfree(plugin); ++ goto error; ++ } ++ ++ /* the lirc_atiusb module doesn't memset rbuf here ... ? */ ++ if (lirc_buffer_init(rbuf, sizeof(int), 128)) { ++ err("out of memory"); ++ kfree(plugin); ++ kfree(rbuf); ++ goto error; ++ } ++ ++ strcpy(plugin->name, DRIVER_NAME " "); ++ plugin->minor = minor; ++ plugin->code_length = sizeof(int) * 8; ++ plugin->features = LIRC_CAN_REC_MODE2; /* | LIRC_CAN_SEND_MODE2; */ ++ plugin->data = dev; ++ plugin->rbuf = rbuf; ++ plugin->ioctl = NULL; ++ plugin->set_use_inc = &set_use_inc; ++ plugin->set_use_dec = &set_use_dec; ++ plugin->sample_rate = 80; /* sample at 100hz (10ms) */ ++ plugin->add_to_buf = &mceusb_add_to_buf; ++ /* plugin->fops = &mceusb_fops; */ ++ plugin->dev = &udev->dev; ++ plugin->owner = THIS_MODULE; ++ if (lirc_register_plugin(plugin) < 0) { ++ kfree(plugin); ++ lirc_buffer_free(rbuf); ++ kfree(rbuf); ++ goto error; ++ } ++ dev->plugin = plugin; ++ ++ /* clear off the first few messages. these look like ++ * calibration or test data, i can't really tell ++ * this also flushes in case we have random ir data queued up ++ */ ++ for (i = 0; i < 40; i++) ++ (void) usb_bulk_msg(udev, ++ usb_rcvbulkpipe(udev, ++ dev->bulk_in_endpointAddr), ++ junk, 64, &partial, HZ*10); ++ ++ msir_cleanup(dev); ++ mceusb_setup(udev); ++ ++ /* we can register the device now, as it is ready */ ++ usb_set_intfdata(interface, dev); ++ /* let the user know what node this device is now attached to */ ++ /* info("USB Microsoft IR Transceiver device now attached to msir%d", ++ dev->minor); */ ++ up(&minor_table_mutex); ++ return 0; ++error: ++ mceusb_delete(dev); ++ dev = NULL; ++ dprintk("%s: retval = %x", __func__, retval); ++ up(&minor_table_mutex); ++ return retval; ++} ++ ++/** ++ * mceusb_disconnect ++ * ++ * Called by the usb core when the device is removed from the system. ++ * ++ * This routine guarantees that the driver will not submit any more urbs ++ * by clearing dev->udev. It is also supposed to terminate any currently ++ * active urbs. Unfortunately, usb_bulk_msg(), used in skel_read(), does ++ * not provide any way to do this. But at least we can cancel an active ++ * write. ++ */ ++static void mceusb_disconnect(struct usb_interface *interface) ++{ ++ struct usb_skel *dev; ++ int minor; ++ dev = usb_get_intfdata(interface); ++ usb_set_intfdata(interface, NULL); ++ ++ down(&minor_table_mutex); ++ mutex_lock(&dev->sem); ++ minor = dev->minor; ++ ++ /* unhook lirc things */ ++ lirc_unregister_plugin(minor); ++ lirc_buffer_free(dev->plugin->rbuf); ++ kfree(dev->plugin->rbuf); ++ kfree(dev->plugin); ++ /* terminate an ongoing write */ ++ if (atomic_read(&dev->write_busy)) { ++ usb_kill_urb(dev->write_urb); ++ wait_for_completion(&dev->write_finished); ++ } ++ ++ /* prevent device read, write and ioctl */ ++ dev->present = 0; ++ ++ mceusb_delete(dev); ++ ++ info("Microsoft IR Transceiver #%d now disconnected", minor); ++ mutex_unlock(&dev->sem); ++ up(&minor_table_mutex); ++} ++ ++ ++ ++/** ++ * usb_mceusb_init ++ */ ++static int __init usb_mceusb_init(void) ++{ ++ int result; ++ ++ /* register this driver with the USB subsystem */ ++ result = usb_register(&mceusb_driver); ++ if (result) { ++ err("usb_register failed for the " DRIVER_NAME ++ " driver. error number %d", result); ++ return result; ++ } ++ ++ info(DRIVER_DESC " " DRIVER_VERSION); ++ return 0; ++} ++ ++ ++/** ++ * usb_mceusb_exit ++ */ ++static void __exit usb_mceusb_exit(void) ++{ ++ /* deregister this driver with the USB subsystem */ ++ usb_deregister(&mceusb_driver); ++} ++ ++#ifdef MODULE ++module_init(usb_mceusb_init); ++module_exit(usb_mceusb_exit); ++ ++MODULE_DESCRIPTION(DRIVER_DESC); ++MODULE_AUTHOR(DRIVER_AUTHOR); ++MODULE_LICENSE("GPL"); ++MODULE_DEVICE_TABLE(usb, mceusb_table); ++ ++module_param(debug, int, 0644); ++MODULE_PARM_DESC(debug, "Debug enabled or not"); ++ ++#else /* not MODULE */ ++subsys_initcall(usb_mceusb_init); ++ ++#endif /* MODULE */ +diff --git a/drivers/input/lirc/lirc_mceusb2.c b/drivers/input/lirc/lirc_mceusb2.c +new file mode 100644 +index 0000000..2f89238 +--- /dev/null ++++ b/drivers/input/lirc/lirc_mceusb2.c +@@ -0,0 +1,1124 @@ ++/* ++ * LIRC driver for Philips eHome USB Infrared Transceiver ++ * and the Microsoft MCE 2005 Remote Control ++ * ++ * (C) by Martin A. Blatter ++ * ++ * Transmitter support and reception code cleanup. ++ * (C) by Daniel Melander ++ * ++ * Derived from ATI USB driver by Paul Miller and the original ++ * MCE USB driver by Dan Corti ++ * ++ * This driver will only work reliably with kernel version 2.6.10 ++ * or higher, probably because of differences in USB device enumeration ++ * in the kernel code. Device initialization fails most of the time ++ * with earlier kernel versions. ++ * ++ ********************************************************************** ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ++ * ++ */ ++ ++#include ++ ++#include ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "lirc.h" ++#include "lirc_dev.h" ++ ++#define DRIVER_VERSION "1.48" ++#define DRIVER_AUTHOR "Daniel Melander , " \ ++ "Martin Blatter " ++#define DRIVER_DESC "Philips eHome USB IR Transceiver and Microsoft " \ ++ "MCE 2005 Remote Control driver for LIRC" ++#define DRIVER_NAME "lirc_mceusb2" ++ ++#define USB_BUFLEN 16 /* USB reception buffer length */ ++#define LIRCBUF_SIZE 256 /* LIRC work buffer length */ ++ ++/* MCE constants */ ++#define MCE_CMDBUF_SIZE 384 /* MCE Command buffer length */ ++#define MCE_TIME_UNIT 50 /* Approx 50us resolution */ ++#define MCE_CODE_LENGTH 5 /* Normal length of packet (with header) */ ++#define MCE_PACKET_SIZE 4 /* Normal length of packet (without header) */ ++#define MCE_PACKET_HEADER 0x84 /* Actual header format is 0x80 + num_bytes */ ++#define MCE_CONTROL_HEADER 0x9F /* MCE status header */ ++#define MCE_TX_HEADER_LENGTH 3 /* # of bytes in the initializing tx header */ ++#define MCE_MAX_CHANNELS 2 /* Two transmitters, hardware dependent? */ ++#define MCE_DEFAULT_TX_MASK 0x03 /* Val opts: TX1=0x01, TX2=0x02, ALL=0x03 */ ++#define MCE_PULSE_BIT 0x80 /* Pulse bit, MSB set == PULSE else SPACE */ ++#define MCE_PULSE_MASK 0x7F /* Pulse mask */ ++#define MCE_MAX_PULSE_LENGTH 0x7F /* Longest transmittable pulse symbol */ ++#define MCE_PACKET_LENGTH_MASK 0x7F /* Pulse mask */ ++ ++ ++/* module parameters */ ++#ifdef CONFIG_USB_DEBUG ++static int debug = 1; ++#else ++static int debug; ++#endif ++#define dprintk(fmt, args...) \ ++ do { \ ++ if (debug) \ ++ printk(KERN_DEBUG fmt, ## args); \ ++ } while (0) ++ ++/* lock irctl structure */ ++#define IRLOCK mutex_lock(&ir->lock) ++#define IRUNLOCK mutex_unlock(&ir->lock) ++ ++/* general constants */ ++#define SUCCESS 0 ++#define SEND_FLAG_IN_PROGRESS 1 ++#define SEND_FLAG_COMPLETE 2 ++#define RECV_FLAG_IN_PROGRESS 3 ++#define RECV_FLAG_COMPLETE 4 ++ ++#define PHILUSB_INBOUND 1 ++#define PHILUSB_OUTBOUND 2 ++ ++#define VENDOR_PHILIPS 0x0471 ++#define VENDOR_SMK 0x0609 ++#define VENDOR_TATUNG 0x1460 ++#define VENDOR_GATEWAY 0x107b ++#define VENDOR_SHUTTLE 0x1308 ++#define VENDOR_SHUTTLE2 0x051c ++#define VENDOR_MITSUMI 0x03ee ++#define VENDOR_TOPSEED 0x1784 ++#define VENDOR_RICAVISION 0x179d ++#define VENDOR_ITRON 0x195d ++#define VENDOR_FIC 0x1509 ++#define VENDOR_LG 0x043e ++#define VENDOR_MICROSOFT 0x045e ++#define VENDOR_FORMOSA 0x147a ++#define VENDOR_FINTEK 0x1934 ++#define VENDOR_PINNACLE 0x2304 ++#define VENDOR_ECS 0x1019 ++ ++static struct usb_device_id usb_remote_table[] = { ++ /* Philips eHome Infrared Transceiver */ ++ { USB_DEVICE(VENDOR_PHILIPS, 0x0815) }, ++ /* Philips Infrared Transceiver - HP branded */ ++ { USB_DEVICE(VENDOR_PHILIPS, 0x060c) }, ++ /* Philips SRM5100 */ ++ { USB_DEVICE(VENDOR_PHILIPS, 0x060d) }, ++ /* Philips Infrared Transceiver - Omaura */ ++ { USB_DEVICE(VENDOR_PHILIPS, 0x060f) }, ++ /* Philips Infrared Transceiver - Spinel plus */ ++ { USB_DEVICE(VENDOR_PHILIPS, 0x0613) }, ++ /* SMK/Toshiba G83C0004D410 */ ++ { USB_DEVICE(VENDOR_SMK, 0x031d) }, ++ /* SMK eHome Infrared Transceiver (Sony VAIO) */ ++ { USB_DEVICE(VENDOR_SMK, 0x0322) }, ++ /* bundled with Hauppauge PVR-150 */ ++ { USB_DEVICE(VENDOR_SMK, 0x0334) }, ++ /* Tatung eHome Infrared Transceiver */ ++ { USB_DEVICE(VENDOR_TATUNG, 0x9150) }, ++ /* Shuttle eHome Infrared Transceiver */ ++ { USB_DEVICE(VENDOR_SHUTTLE, 0xc001) }, ++ /* Shuttle eHome Infrared Transceiver */ ++ { USB_DEVICE(VENDOR_SHUTTLE2, 0xc001) }, ++ /* Gateway eHome Infrared Transceiver */ ++ { USB_DEVICE(VENDOR_GATEWAY, 0x3009) }, ++ /* Mitsumi */ ++ { USB_DEVICE(VENDOR_MITSUMI, 0x2501) }, ++ /* Topseed eHome Infrared Transceiver */ ++ { USB_DEVICE(VENDOR_TOPSEED, 0x0001) }, ++ /* Topseed HP eHome Infrared Transceiver */ ++ { USB_DEVICE(VENDOR_TOPSEED, 0x0006) }, ++ /* Topseed eHome Infrared Transceiver */ ++ { USB_DEVICE(VENDOR_TOPSEED, 0x0007) }, ++ /* Topseed eHome Infrared Transceiver */ ++ { USB_DEVICE(VENDOR_TOPSEED, 0x0008) }, ++ /* Ricavision internal Infrared Transceiver */ ++ { USB_DEVICE(VENDOR_RICAVISION, 0x0010) }, ++ /* Itron ione Libra Q-11 */ ++ { USB_DEVICE(VENDOR_ITRON, 0x7002) }, ++ /* FIC eHome Infrared Transceiver */ ++ { USB_DEVICE(VENDOR_FIC, 0x9242) }, ++ /* LG eHome Infrared Transceiver */ ++ { USB_DEVICE(VENDOR_LG, 0x9803) }, ++ /* Microsoft MCE Infrared Transceiver */ ++ { USB_DEVICE(VENDOR_MICROSOFT, 0x00a0) }, ++ /* Formosa eHome Infrared Transceiver */ ++ { USB_DEVICE(VENDOR_FORMOSA, 0xe015) }, ++ /* Formosa21 / eHome Infrared Receiver */ ++ { USB_DEVICE(VENDOR_FORMOSA, 0xe016) }, ++ /* Formosa aim / Trust MCE Infrared Receiver */ ++ { USB_DEVICE(VENDOR_FORMOSA, 0xe017) }, ++ /* Formosa Industrial Computing / Beanbag Emulation Device */ ++ { USB_DEVICE(VENDOR_FORMOSA, 0xe018) }, ++ /* Fintek eHome Infrared Transceiver */ ++ { USB_DEVICE(VENDOR_FINTEK, 0x0602) }, ++ /* Pinnacle Remote Kit */ ++ { USB_DEVICE(VENDOR_PINNACLE, 0x0225) }, ++ /* Elitegroup Computer Systems IR*/ ++ { USB_DEVICE(VENDOR_ECS, 0x0f38) }, ++ /* Terminating entry */ ++ { } ++}; ++ ++static struct usb_device_id pinnacle_list[] = { ++ { USB_DEVICE(VENDOR_PINNACLE, 0x0225) }, ++ {} ++}; ++ ++static struct usb_device_id transmitter_mask_list[] = { ++ { USB_DEVICE(VENDOR_SMK, 0x031d) }, ++ { USB_DEVICE(VENDOR_SMK, 0x0322) }, ++ { USB_DEVICE(VENDOR_SMK, 0x0334) }, ++ { USB_DEVICE(VENDOR_TOPSEED, 0x0001) }, ++ { USB_DEVICE(VENDOR_TOPSEED, 0x0007) }, ++ { USB_DEVICE(VENDOR_TOPSEED, 0x0008) }, ++ { USB_DEVICE(VENDOR_PINNACLE, 0x0225) }, ++ {} ++}; ++ ++/* data structure for each usb remote */ ++struct irctl { ++ ++ /* usb */ ++ struct usb_device *usbdev; ++ struct urb *urb_in; ++ int devnum; ++ struct usb_endpoint_descriptor *usb_ep_in; ++ struct usb_endpoint_descriptor *usb_ep_out; ++ ++ /* buffers and dma */ ++ unsigned char *buf_in; ++ unsigned int len_in; ++ dma_addr_t dma_in; ++ dma_addr_t dma_out; ++ ++ /* lirc */ ++ struct lirc_plugin *p; ++ int lircdata; ++ unsigned char is_pulse; ++ struct { ++ u32 connected:1; ++ u32 pinnacle:1; ++ u32 transmitter_mask_inverted:1; ++ u32 reserved:29; ++ } flags; ++ ++ unsigned char transmitter_mask; ++ unsigned int carrier_freq; ++ ++ /* handle sending (init strings) */ ++ int send_flags; ++ wait_queue_head_t wait_out; ++ ++ struct mutex lock; ++}; ++ ++/* init strings */ ++static char init1[] = {0x00, 0xff, 0xaa, 0xff, 0x0b}; ++static char init2[] = {0xff, 0x18}; ++ ++static char pin_init1[] = { 0x9f, 0x07}; ++static char pin_init2[] = { 0x9f, 0x13}; ++static char pin_init3[] = { 0x9f, 0x0d}; ++ ++static void usb_remote_printdata(struct irctl *ir, char *buf, int len) ++{ ++ char codes[USB_BUFLEN*3 + 1]; ++ int i; ++ ++ if (len <= 0) ++ return; ++ ++ for (i = 0; i < len && i < USB_BUFLEN; i++) ++ snprintf(codes+i*3, 4, "%02x ", buf[i] & 0xFF); ++ ++ printk(KERN_INFO "" DRIVER_NAME "[%d]: data received %s (length=%d)\n", ++ ir->devnum, codes, len); ++} ++ ++static void usb_async_callback(struct urb *urb, struct pt_regs *regs) ++{ ++ struct irctl *ir; ++ int len; ++ ++ if (!urb) ++ return; ++ ++ ir = urb->context; ++ if (ir) { ++ len = urb->actual_length; ++ ++ dprintk(DRIVER_NAME ++ "[%d]: callback called (status=%d len=%d)\n", ++ ir->devnum, urb->status, len); ++ ++ if (debug) ++ usb_remote_printdata(ir, urb->transfer_buffer, len); ++ } ++ ++} ++ ++ ++/* request incoming or send outgoing usb packet - used to initialize remote */ ++static void request_packet_async(struct irctl *ir, ++ struct usb_endpoint_descriptor *ep, ++ unsigned char *data, int size, int urb_type) ++{ ++ int res; ++ struct urb *async_urb; ++ unsigned char *async_buf; ++ ++ if (urb_type) { ++ async_urb = usb_alloc_urb(0, GFP_KERNEL); ++ if (async_urb) { ++ /* alloc buffer */ ++ async_buf = kmalloc(size, GFP_KERNEL); ++ if (async_buf) { ++ if (urb_type == PHILUSB_OUTBOUND) { ++ /* outbound data */ ++ usb_fill_int_urb(async_urb, ir->usbdev, ++ usb_sndintpipe(ir->usbdev, ++ ep->bEndpointAddress), ++ async_buf, ++ size, ++ (usb_complete_t) usb_async_callback, ++ ir, ep->bInterval); ++ ++ memcpy(async_buf, data, size); ++ } else { ++ /* inbound data */ ++ usb_fill_int_urb(async_urb, ir->usbdev, ++ usb_rcvintpipe(ir->usbdev, ++ ep->bEndpointAddress), ++ async_buf, size, ++ (usb_complete_t) usb_async_callback, ++ ir, ep->bInterval); ++ } ++ } else { ++ usb_free_urb(async_urb); ++ return; ++ } ++ } ++ } else { ++ /* standard request */ ++ async_urb = ir->urb_in; ++ ir->send_flags = RECV_FLAG_IN_PROGRESS; ++ } ++ dprintk(DRIVER_NAME "[%d]: receive request called (size=%#x)\n", ++ ir->devnum, size); ++ ++ async_urb->transfer_buffer_length = size; ++ async_urb->dev = ir->usbdev; ++ ++ res = usb_submit_urb(async_urb, GFP_ATOMIC); ++ if (res) { ++ dprintk(DRIVER_NAME "[%d]: receive request FAILED! (res=%d)\n", ++ ir->devnum, res); ++ return; ++ } ++ dprintk(DRIVER_NAME "[%d]: receive request complete (res=%d)\n", ++ ir->devnum, res); ++} ++ ++static int unregister_from_lirc(struct irctl *ir) ++{ ++ struct lirc_plugin *p = ir->p; ++ int devnum; ++ int rtn; ++ ++ devnum = ir->devnum; ++ dprintk(DRIVER_NAME "[%d]: unregister from lirc called\n", devnum); ++ ++ rtn = lirc_unregister_plugin(p->minor); ++ if (rtn > 0) { ++ printk(DRIVER_NAME "[%d]: error in lirc_unregister minor: %d\n" ++ "Trying again...\n", devnum, p->minor); ++ if (rtn == -EBUSY) { ++ printk(DRIVER_NAME ++ "[%d]: device is opened, will unregister" ++ " on close\n", devnum); ++ return -EAGAIN; ++ } ++ set_current_state(TASK_INTERRUPTIBLE); ++ schedule_timeout(HZ); ++ ++ rtn = lirc_unregister_plugin(p->minor); ++ if (rtn > 0) ++ printk(DRIVER_NAME "[%d]: lirc_unregister failed\n", ++ devnum); ++ } ++ ++ if (rtn != SUCCESS) { ++ printk(DRIVER_NAME "[%d]: didn't free resources\n", devnum); ++ return -EAGAIN; ++ } ++ ++ printk(DRIVER_NAME "[%d]: usb remote disconnected\n", devnum); ++ ++ lirc_buffer_free(p->rbuf); ++ kfree(p->rbuf); ++ kfree(p); ++ kfree(ir); ++ return SUCCESS; ++} ++ ++static int set_use_inc(void *data) ++{ ++ struct irctl *ir = data; ++ ++ if (!ir) { ++ printk(DRIVER_NAME "[?]: set_use_inc called with no context\n"); ++ return -EIO; ++ } ++ dprintk(DRIVER_NAME "[%d]: set use inc\n", ir->devnum); ++ ++ if (!ir->flags.connected) { ++ if (!ir->usbdev) ++ return -ENOENT; ++ ir->flags.connected = 1; ++ } ++ ++ return SUCCESS; ++} ++ ++static void set_use_dec(void *data) ++{ ++ struct irctl *ir = data; ++ ++ if (!ir) { ++ printk(DRIVER_NAME "[?]: set_use_dec called with no context\n"); ++ return; ++ } ++ dprintk(DRIVER_NAME "[%d]: set use dec\n", ir->devnum); ++ ++ if (ir->flags.connected) { ++ IRLOCK; ++ ir->flags.connected = 0; ++ IRUNLOCK; ++ } ++} ++ ++static void send_packet_to_lirc(struct irctl *ir) ++{ ++ if (ir->lircdata != 0) { ++ lirc_buffer_write_1(ir->p->rbuf, ++ (unsigned char *) &ir->lircdata); ++ wake_up(&ir->p->rbuf->wait_poll); ++ ir->lircdata = 0; ++ } ++} ++ ++static void usb_remote_recv(struct urb *urb, struct pt_regs *regs) ++{ ++ struct irctl *ir; ++ int buf_len, packet_len; ++ int i, j; ++ ++ if (!urb) ++ return; ++ ++ ir = urb->context; ++ if (!ir) { ++ usb_unlink_urb(urb); ++ return; ++ } ++ ++ buf_len = urb->actual_length; ++ packet_len = 0; ++ ++ if (debug) ++ usb_remote_printdata(ir, urb->transfer_buffer, buf_len); ++ ++ if (ir->send_flags == RECV_FLAG_IN_PROGRESS) { ++ ir->send_flags = SEND_FLAG_COMPLETE; ++ dprintk(DRIVER_NAME "[%d]: setup answer received %d bytes\n", ++ ir->devnum, buf_len); ++ } ++ ++ switch (urb->status) { ++ /* success */ ++ case SUCCESS: ++ for (i = 0; i < buf_len; i++) { ++ /* decode mce packets of the form (84),AA,BB,CC,DD */ ++ switch (ir->buf_in[i]) { ++ ++ /* data headers */ ++ case 0x90: /* used Pinnacle Remote Kit */ ++ case 0x8F: ++ case 0x8E: ++ case 0x8D: ++ case 0x8C: ++ case 0x8B: ++ case 0x8A: ++ case 0x89: ++ case 0x88: ++ case 0x87: ++ case 0x86: ++ case 0x85: ++ case 0x84: ++ case 0x83: ++ case 0x82: ++ case 0x81: ++ case 0x80: ++ /* decode packet data */ ++ packet_len = ir->buf_in[i] & ++ MCE_PACKET_LENGTH_MASK; ++ for (j = 1; ++ j <= packet_len && (i+j < buf_len); ++ j++) { ++ /* rising/falling flank */ ++ if (ir->is_pulse != ++ (ir->buf_in[i + j] & ++ MCE_PULSE_BIT)) { ++ send_packet_to_lirc(ir); ++ ir->is_pulse = ++ ir->buf_in[i + j] & ++ MCE_PULSE_BIT; ++ } ++ ++ /* accumulate mce pulse/space values */ ++ ir->lircdata += ++ (ir->buf_in[i + j] & ++ MCE_PULSE_MASK)*MCE_TIME_UNIT; ++ ir->lircdata |= ++ (ir->is_pulse ? PULSE_BIT : 0); ++ } ++ ++ i += packet_len; ++ break; ++ ++ /* status header (0x9F) */ ++ case MCE_CONTROL_HEADER: ++ /* A transmission containing one or ++ more consecutive ir commands always ++ ends with a GAP of 100ms followed by the ++ sequence 0x9F 0x01 0x01 0x9F 0x15 ++ 0x00 0x00 0x80 */ ++ ++ /* ++ Uncomment this if the last 100ms ++ "infinity"-space should be transmitted ++ to lirc directly instead of at the beginning ++ of the next transmission. Changes pulse/space order. ++ ++ if (++i < buf_len && ir->buf_in[i]==0x01) ++ send_packet_to_lirc(ir); ++ ++ */ ++ ++ /* end decode loop */ ++ i = buf_len; ++ break; ++ default: ++ break; ++ } ++ } ++ ++ break; ++ ++ /* unlink */ ++ case -ECONNRESET: ++ case -ENOENT: ++ case -ESHUTDOWN: ++ usb_unlink_urb(urb); ++ return; ++ ++ case -EPIPE: ++ default: ++ break; ++ } ++ ++ /* resubmit urb */ ++ usb_submit_urb(urb, GFP_ATOMIC); ++} ++ ++ ++static ssize_t lirc_write(struct file *file, const char *buf, ++ size_t n, loff_t *ppos) ++{ ++ int i, count = 0, cmdcount = 0; ++ struct irctl *ir = NULL; ++ int wbuf[LIRCBUF_SIZE]; /* Workbuffer with values from lirc */ ++ unsigned char cmdbuf[MCE_CMDBUF_SIZE]; /* MCE command buffer */ ++ unsigned long signal_duration = 0; /* Singnal length in us */ ++ struct timeval start_time, end_time; ++ ++ do_gettimeofday(&start_time); ++ ++ /* Retrieve lirc_plugin data for the device */ ++ ir = lirc_get_pdata(file); ++ if (!ir && !ir->usb_ep_out) ++ return -EFAULT; ++ ++ if (n % sizeof(int)) ++ return -EINVAL; ++ count = n / sizeof(int); ++ ++ /* Check if command is within limits */ ++ if (count > LIRCBUF_SIZE || count%2 == 0) ++ return -EINVAL; ++ if (copy_from_user(wbuf, buf, n)) ++ return -EFAULT; ++ ++ /* MCE tx init header */ ++ cmdbuf[cmdcount++] = MCE_CONTROL_HEADER; ++ cmdbuf[cmdcount++] = 0x08; ++ cmdbuf[cmdcount++] = ir->transmitter_mask; ++ ++ /* Generate mce packet data */ ++ for (i = 0; (i < count) && (cmdcount < MCE_CMDBUF_SIZE); i++) { ++ signal_duration += wbuf[i]; ++ wbuf[i] = wbuf[i] / MCE_TIME_UNIT; ++ ++ do { /* loop to support long pulses/spaces > 127*50us=6.35ms */ ++ ++ /* Insert mce packet header every 4th entry */ ++ if ((cmdcount < MCE_CMDBUF_SIZE) && ++ (cmdcount - MCE_TX_HEADER_LENGTH) % ++ MCE_CODE_LENGTH == 0) ++ cmdbuf[cmdcount++] = MCE_PACKET_HEADER; ++ ++ /* Insert mce packet data */ ++ if (cmdcount < MCE_CMDBUF_SIZE) ++ cmdbuf[cmdcount++] = ++ (wbuf[i] < MCE_PULSE_BIT ? ++ wbuf[i] : MCE_MAX_PULSE_LENGTH) | ++ (i & 1 ? 0x00 : MCE_PULSE_BIT); ++ else ++ return -EINVAL; ++ } while ((wbuf[i] > MCE_MAX_PULSE_LENGTH) && ++ (wbuf[i] -= MCE_MAX_PULSE_LENGTH)); ++ } ++ ++ /* Fix packet length in last header */ ++ cmdbuf[cmdcount - (cmdcount - MCE_TX_HEADER_LENGTH) % MCE_CODE_LENGTH] = ++ 0x80 + (cmdcount - MCE_TX_HEADER_LENGTH) % MCE_CODE_LENGTH - 1; ++ ++ /* Check if we have room for the empty packet at the end */ ++ if (cmdcount >= MCE_CMDBUF_SIZE) ++ return -EINVAL; ++ ++ /* All mce commands end with an empty packet (0x80) */ ++ cmdbuf[cmdcount++] = 0x80; ++ ++ /* Transmit the command to the mce device */ ++ request_packet_async(ir, ir->usb_ep_out, cmdbuf, ++ cmdcount, PHILUSB_OUTBOUND); ++ ++ /* The lircd gap calculation expects the write function to ++ wait the time it takes for the ircommand to be sent before ++ it returns. */ ++ do_gettimeofday(&end_time); ++ signal_duration -= (end_time.tv_usec - start_time.tv_usec) + ++ (end_time.tv_sec - start_time.tv_sec) * 1000000; ++ ++ /* delay with the closest number of ticks */ ++ set_current_state(TASK_INTERRUPTIBLE); ++ schedule_timeout(usecs_to_jiffies(signal_duration)); ++ ++ return n; ++} ++ ++static void set_transmitter_mask(struct irctl *ir, unsigned int mask) ++{ ++ if (ir->flags.transmitter_mask_inverted) ++ ir->transmitter_mask = (mask != 0x03 ? mask ^ 0x03 : mask) << 1; ++ else ++ ir->transmitter_mask = mask; ++} ++ ++ ++/* Sets the send carrier frequency */ ++static int set_send_carrier(struct irctl *ir, int carrier) ++{ ++ int clk = 10000000; ++ int prescaler = 0, divisor = 0; ++ unsigned char cmdbuf[] = { 0x9F, 0x06, 0x01, 0x80 }; ++ ++ /* Carrier is changed */ ++ if (ir->carrier_freq != carrier) { ++ ++ if (carrier <= 0) { ++ ir->carrier_freq = carrier; ++ dprintk(DRIVER_NAME "[%d]: SET_CARRIER disabling " ++ "carrier modulation\n", ir->devnum); ++ request_packet_async(ir, ir->usb_ep_out, ++ cmdbuf, sizeof(cmdbuf), ++ PHILUSB_OUTBOUND); ++ return carrier; ++ } ++ ++ for (prescaler = 0; prescaler < 4; ++prescaler) { ++ divisor = (clk >> (2 * prescaler)) / carrier; ++ if (divisor <= 0xFF) { ++ ir->carrier_freq = carrier; ++ cmdbuf[2] = prescaler; ++ cmdbuf[3] = divisor; ++ dprintk(DRIVER_NAME "[%d]: SET_CARRIER " ++ "requesting %d Hz\n", ++ ir->devnum, carrier); ++ ++ /* Transmit the new carrier to the mce ++ device */ ++ request_packet_async(ir, ir->usb_ep_out, ++ cmdbuf, sizeof(cmdbuf), ++ PHILUSB_OUTBOUND); ++ return carrier; ++ } ++ } ++ ++ return -EINVAL; ++ ++ } ++ ++ return carrier; ++} ++ ++ ++static int lirc_ioctl(struct inode *node, struct file *filep, ++ unsigned int cmd, unsigned long arg) ++{ ++ int result; ++ unsigned int ivalue; ++ unsigned long lvalue; ++ struct irctl *ir = NULL; ++ ++ /* Retrieve lirc_plugin data for the device */ ++ ir = lirc_get_pdata(filep); ++ if (!ir && !ir->usb_ep_out) ++ return -EFAULT; ++ ++ ++ switch (cmd) { ++ case LIRC_SET_TRANSMITTER_MASK: ++ ++ result = get_user(ivalue, (unsigned int *) arg); ++ if (result) ++ return result; ++ switch (ivalue) { ++ case 0x01: /* Transmitter 1 => 0x04 */ ++ case 0x02: /* Transmitter 2 => 0x02 */ ++ case 0x03: /* Transmitter 1 & 2 => 0x06 */ ++ set_transmitter_mask(ir, ivalue); ++ break; ++ ++ default: /* Unsupported transmitter mask */ ++ return MCE_MAX_CHANNELS; ++ } ++ ++ dprintk(DRIVER_NAME ": SET_TRANSMITTERS mask=%d\n", ivalue); ++ break; ++ ++ case LIRC_GET_SEND_MODE: ++ ++ result = put_user(LIRC_SEND2MODE(LIRC_CAN_SEND_PULSE & ++ LIRC_CAN_SEND_MASK), ++ (unsigned long *) arg); ++ ++ if (result) ++ return result; ++ break; ++ ++ case LIRC_SET_SEND_MODE: ++ ++ result = get_user(lvalue, (unsigned long *) arg); ++ ++ if (result) ++ return result; ++ if (lvalue != (LIRC_MODE_PULSE&LIRC_CAN_SEND_MASK)) ++ return -EINVAL; ++ break; ++ ++ case LIRC_SET_SEND_CARRIER: ++ ++ result = get_user(ivalue, (unsigned int *) arg); ++ if (result) ++ return result; ++ ++ set_send_carrier(ir, ivalue); ++ break; ++ ++ default: ++ return -ENOIOCTLCMD; ++ } ++ ++ return 0; ++} ++ ++static struct file_operations lirc_fops = { ++ .write = lirc_write, ++}; ++ ++ ++static int usb_remote_probe(struct usb_interface *intf, ++ const struct usb_device_id *id) ++{ ++ struct usb_device *dev = interface_to_usbdev(intf); ++ struct usb_host_interface *idesc; ++ struct usb_endpoint_descriptor *ep = NULL; ++ struct usb_endpoint_descriptor *ep_in = NULL; ++ struct usb_endpoint_descriptor *ep_out = NULL; ++ struct usb_host_config *config; ++ struct irctl *ir = NULL; ++ struct lirc_plugin *plugin = NULL; ++ struct lirc_buffer *rbuf = NULL; ++ int devnum, pipe, maxp; ++ int minor = 0; ++ int i; ++ char buf[63], name[128] = ""; ++ int mem_failure = 0; ++ int is_pinnacle; ++ ++ dprintk(DRIVER_NAME ": usb probe called\n"); ++ ++ usb_reset_device(dev); ++ ++ config = dev->actconfig; ++ ++ idesc = intf->cur_altsetting; ++ ++ is_pinnacle = usb_match_id(intf, pinnacle_list) ? 1 : 0; ++ ++ /* step through the endpoints to find first bulk in and out endpoint */ ++ for (i = 0; i < idesc->desc.bNumEndpoints; ++i) { ++ ep = &idesc->endpoint[i].desc; ++ ++ if ((ep_in == NULL) ++ && ((ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK) ++ == USB_DIR_IN) ++ && (((ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ++ == USB_ENDPOINT_XFER_BULK) ++ || ((ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ++ == USB_ENDPOINT_XFER_INT))) { ++ ++ dprintk(DRIVER_NAME ": acceptable inbound endpoint " ++ "found\n"); ++ ep_in = ep; ++ ep_in->bmAttributes = USB_ENDPOINT_XFER_INT; ++ if (is_pinnacle) ++ /* ++ * setting seems to 1 seem to cause issues with ++ * Pinnacle timing out on transfer. ++ */ ++ ep_in->bInterval = ep->bInterval; ++ else ++ ep_in->bInterval = 1; ++ } ++ ++ if ((ep_out == NULL) ++ && ((ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK) ++ == USB_DIR_OUT) ++ && (((ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ++ == USB_ENDPOINT_XFER_BULK) ++ || ((ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ++ == USB_ENDPOINT_XFER_INT))) { ++ ++ dprintk(DRIVER_NAME ": acceptable outbound endpoint " ++ "found\n"); ++ ep_out = ep; ++ ep_out->bmAttributes = USB_ENDPOINT_XFER_INT; ++ if (is_pinnacle) ++ /* ++ * setting seems to 1 seem to cause issues with ++ * Pinnacle timing out on transfer. ++ */ ++ ep_out->bInterval = ep->bInterval; ++ else ++ ep_out->bInterval = 1; ++ } ++ } ++ if (ep_in == NULL) { ++ dprintk(DRIVER_NAME ": inbound and/or endpoint not found\n"); ++ return -ENODEV; ++ } ++ ++ devnum = dev->devnum; ++ pipe = usb_rcvintpipe(dev, ep_in->bEndpointAddress); ++ maxp = usb_maxpacket(dev, pipe, usb_pipeout(pipe)); ++ ++ /* allocate kernel memory */ ++ mem_failure = 0; ++ ir = kmalloc(sizeof(struct irctl), GFP_KERNEL); ++ if (!ir) { ++ mem_failure = 1; ++ goto mem_failure_switch; ++ } ++ ++ memset(ir, 0, sizeof(struct irctl)); ++ ++ plugin = kmalloc(sizeof(struct lirc_plugin), GFP_KERNEL); ++ if (!plugin) { ++ mem_failure = 2; ++ goto mem_failure_switch; ++ } ++ ++ rbuf = kmalloc(sizeof(struct lirc_buffer), GFP_KERNEL); ++ if (!rbuf) { ++ mem_failure = 3; ++ goto mem_failure_switch; ++ } ++ ++ if (lirc_buffer_init(rbuf, sizeof(int), LIRCBUF_SIZE)) { ++ mem_failure = 4; ++ goto mem_failure_switch; ++ } ++ ++ ir->buf_in = usb_buffer_alloc(dev, maxp, GFP_ATOMIC, &ir->dma_in); ++ if (!ir->buf_in) { ++ mem_failure = 5; ++ goto mem_failure_switch; ++ } ++ ++ ir->urb_in = usb_alloc_urb(0, GFP_KERNEL); ++ if (!ir->urb_in) { ++ mem_failure = 7; ++ goto mem_failure_switch; ++ } ++ ++ memset(plugin, 0, sizeof(struct lirc_plugin)); ++ ++ strcpy(plugin->name, DRIVER_NAME " "); ++ plugin->minor = -1; ++ plugin->features = LIRC_CAN_SEND_PULSE | ++ LIRC_CAN_SET_TRANSMITTER_MASK | ++ LIRC_CAN_REC_MODE2 | ++ LIRC_CAN_SET_SEND_CARRIER; ++ plugin->data = ir; ++ plugin->rbuf = rbuf; ++ plugin->set_use_inc = &set_use_inc; ++ plugin->set_use_dec = &set_use_dec; ++ plugin->code_length = sizeof(int) * 8; ++ plugin->ioctl = lirc_ioctl; ++ plugin->fops = &lirc_fops; ++ plugin->dev = &dev->dev; ++ plugin->owner = THIS_MODULE; ++ ++ mutex_init(&ir->lock); ++ init_waitqueue_head(&ir->wait_out); ++ ++ minor = lirc_register_plugin(plugin); ++ if (minor < 0) ++ mem_failure = 9; ++ ++mem_failure_switch: ++ ++ /* free allocated memory incase of failure */ ++ switch (mem_failure) { ++ case 9: ++ usb_free_urb(ir->urb_in); ++ case 7: ++ usb_buffer_free(dev, maxp, ir->buf_in, ir->dma_in); ++ case 5: ++ lirc_buffer_free(rbuf); ++ case 4: ++ kfree(rbuf); ++ case 3: ++ kfree(plugin); ++ case 2: ++ kfree(ir); ++ case 1: ++ printk(DRIVER_NAME "[%d]: out of memory (code=%d)\n", ++ devnum, mem_failure); ++ return -ENOMEM; ++ } ++ ++ plugin->minor = minor; ++ ir->p = plugin; ++ ir->devnum = devnum; ++ ir->usbdev = dev; ++ ir->len_in = maxp; ++ ir->flags.connected = 0; ++ ir->flags.pinnacle = is_pinnacle; ++ ir->flags.transmitter_mask_inverted = ++ usb_match_id(intf, transmitter_mask_list) ? 0 : 1; ++ ++ ir->lircdata = PULSE_MASK; ++ ir->is_pulse = 0; ++ ++ /* ir->flags.transmitter_mask_inverted must be set */ ++ set_transmitter_mask(ir, MCE_DEFAULT_TX_MASK); ++ /* Saving usb interface data for use by the transmitter routine */ ++ ir->usb_ep_in = ep_in; ++ ir->usb_ep_out = ep_out; ++ ++ if (dev->descriptor.iManufacturer ++ && usb_string(dev, dev->descriptor.iManufacturer, buf, 63) > 0) ++ strncpy(name, buf, 128); ++ if (dev->descriptor.iProduct ++ && usb_string(dev, dev->descriptor.iProduct, buf, 63) > 0) ++ snprintf(name, 128, "%s %s", name, buf); ++ printk(DRIVER_NAME "[%d]: %s on usb%d:%d\n", devnum, name, ++ dev->bus->busnum, devnum); ++ ++ /* inbound data */ ++ usb_fill_int_urb(ir->urb_in, dev, pipe, ir->buf_in, ++ maxp, (usb_complete_t) usb_remote_recv, ir, ep_in->bInterval); ++ ++ /* initialize device */ ++ if (ir->flags.pinnacle) { ++ int usbret; ++ ++ /* ++ * I have no idea why but this reset seems to be crucial to ++ * getting the device to do outbound IO correctly - without ++ * this the device seems to hang, ignoring all input - although ++ * IR signals are correctly sent from the device, no input is ++ * interpreted by the device and the host never does the ++ * completion routine ++ */ ++ ++ usbret = usb_reset_configuration(dev); ++ printk(DRIVER_NAME "[%d]: usb reset config ret %x\n", ++ devnum, usbret); ++ ++ /* ++ * its possible we really should wait for a return ++ * for each of these... ++ */ ++ request_packet_async(ir, ep_in, NULL, maxp, PHILUSB_INBOUND); ++ request_packet_async(ir, ep_out, pin_init1, sizeof(pin_init1), ++ PHILUSB_OUTBOUND); ++ request_packet_async(ir, ep_in, NULL, maxp, PHILUSB_INBOUND); ++ request_packet_async(ir, ep_out, pin_init2, sizeof(pin_init2), ++ PHILUSB_OUTBOUND); ++ request_packet_async(ir, ep_in, NULL, maxp, PHILUSB_INBOUND); ++ request_packet_async(ir, ep_out, pin_init3, sizeof(pin_init3), ++ PHILUSB_OUTBOUND); ++ /* if we dont issue the correct number of receives ++ * (PHILUSB_INBOUND) for each outbound, then the first few ir ++ * pulses will be interpreted by the usb_async_callback routine ++ * - we should ensure we have the right amount OR less - as the ++ * usb_remote_recv routine will handle the control packets OK - ++ * they start with 0x9f - but the async callback doesnt handle ++ * ir pulse packets ++ */ ++ request_packet_async(ir, ep_in, NULL, maxp, 0); ++ } else { ++ request_packet_async(ir, ep_in, NULL, maxp, PHILUSB_INBOUND); ++ request_packet_async(ir, ep_in, NULL, maxp, PHILUSB_INBOUND); ++ request_packet_async(ir, ep_out, init1, ++ sizeof(init1), PHILUSB_OUTBOUND); ++ request_packet_async(ir, ep_in, NULL, maxp, PHILUSB_INBOUND); ++ request_packet_async(ir, ep_out, init2, ++ sizeof(init2), PHILUSB_OUTBOUND); ++ request_packet_async(ir, ep_in, NULL, maxp, 0); ++ } ++ ++ usb_set_intfdata(intf, ir); ++ ++ return SUCCESS; ++} ++ ++ ++static void usb_remote_disconnect(struct usb_interface *intf) ++{ ++ struct usb_device *dev = interface_to_usbdev(intf); ++ struct irctl *ir = usb_get_intfdata(intf); ++ ++ usb_set_intfdata(intf, NULL); ++ ++ if (!ir || !ir->p) ++ return; ++ ++ ir->usbdev = NULL; ++ wake_up_all(&ir->wait_out); ++ ++ IRLOCK; ++ usb_kill_urb(ir->urb_in); ++ usb_free_urb(ir->urb_in); ++ usb_buffer_free(dev, ir->len_in, ir->buf_in, ir->dma_in); ++ IRUNLOCK; ++ ++ unregister_from_lirc(ir); ++} ++ ++static int usb_remote_suspend(struct usb_interface *intf, pm_message_t message) ++{ ++ struct irctl *ir = usb_get_intfdata(intf); ++ printk(DRIVER_NAME "[%d]: suspend\n", ir->devnum); ++ usb_kill_urb(ir->urb_in); ++ return 0; ++} ++ ++static int usb_remote_resume(struct usb_interface *intf) ++{ ++ struct irctl *ir = usb_get_intfdata(intf); ++ printk(DRIVER_NAME "[%d]: resume\n", ir->devnum); ++ if (usb_submit_urb(ir->urb_in, GFP_ATOMIC)) ++ return -EIO; ++ return 0; ++} ++ ++static struct usb_driver usb_remote_driver = { ++ .name = DRIVER_NAME, ++ .probe = usb_remote_probe, ++ .disconnect = usb_remote_disconnect, ++ .suspend = usb_remote_suspend, ++ .resume = usb_remote_resume, ++ .id_table = usb_remote_table ++}; ++ ++#ifdef MODULE ++static int __init usb_remote_init(void) ++{ ++ int i; ++ ++ printk(KERN_INFO "\n"); ++ printk(KERN_INFO DRIVER_NAME ": " DRIVER_DESC " " DRIVER_VERSION "\n"); ++ printk(KERN_INFO DRIVER_NAME ": " DRIVER_AUTHOR "\n"); ++ dprintk(DRIVER_NAME ": debug mode enabled\n"); ++ ++ request_module("lirc_dev"); ++ ++ i = usb_register(&usb_remote_driver); ++ if (i < 0) { ++ printk(DRIVER_NAME ": usb register failed, result = %d\n", i); ++ return -ENODEV; ++ } ++ ++ return SUCCESS; ++} ++ ++static void __exit usb_remote_exit(void) ++{ ++ usb_deregister(&usb_remote_driver); ++} ++ ++module_init(usb_remote_init); ++module_exit(usb_remote_exit); ++ ++MODULE_DESCRIPTION(DRIVER_DESC); ++MODULE_AUTHOR(DRIVER_AUTHOR); ++MODULE_LICENSE("GPL"); ++MODULE_DEVICE_TABLE(usb, usb_remote_table); ++ ++module_param(debug, bool, 0644); ++MODULE_PARM_DESC(debug, "Debug enabled or not"); ++#endif /* MODULE */ +diff --git a/drivers/input/lirc/lirc_parallel.c b/drivers/input/lirc/lirc_parallel.c +new file mode 100644 +index 0000000..912cad2 +--- /dev/null ++++ b/drivers/input/lirc/lirc_parallel.c +@@ -0,0 +1,728 @@ ++/**************************************************************************** ++ ** lirc_parallel.c ********************************************************* ++ **************************************************************************** ++ * ++ * lirc_parallel - device driver for infra-red signal receiving and ++ * transmitting unit built by the author ++ * ++ * Copyright (C) 1998 Christoph Bartelmus ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ++ * ++ */ ++ ++/*********************************************************************** ++ ************************* Includes *********************** ++ ***********************************************************************/ ++ ++#include ++ ++#include ++ ++#ifdef CONFIG_SMP ++#error "--- Sorry, this driver is not SMP safe. ---" ++#endif ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++ ++#include "lirc.h" ++#include "lirc_dev.h" ++ ++#include "lirc_parallel.h" ++ ++#define LIRC_DRIVER_NAME "lirc_parallel" ++ ++#ifndef LIRC_IRQ ++#define LIRC_IRQ 7 ++#endif ++#ifndef LIRC_PORT ++#define LIRC_PORT 0x378 ++#endif ++#ifndef LIRC_TIMER ++#define LIRC_TIMER 65536 ++#endif ++ ++/*********************************************************************** ++ ************************* Globale Variablen *********************** ++ ***********************************************************************/ ++ ++static int debug; ++static int check_pselecd; ++ ++unsigned int irq = LIRC_IRQ; ++unsigned int io = LIRC_PORT; ++#ifdef LIRC_TIMER ++unsigned int timer; ++unsigned int default_timer = LIRC_TIMER; ++#endif ++ ++#define WBUF_SIZE (256) ++#define RBUF_SIZE (256) /* this must be a power of 2 larger than 1 */ ++ ++static int wbuf[WBUF_SIZE]; ++static int rbuf[RBUF_SIZE]; ++ ++DECLARE_WAIT_QUEUE_HEAD(lirc_wait); ++ ++unsigned int rptr; ++unsigned int wptr; ++unsigned int lost_irqs; ++int is_open; ++ ++struct parport *pport; ++struct pardevice *ppdevice; ++int is_claimed; ++ ++unsigned int tx_mask = 1; ++ ++/*********************************************************************** ++ ************************* Interne Funktionen *********************** ++ ***********************************************************************/ ++ ++static inline unsigned int in(int offset) ++{ ++ switch (offset) { ++ case LIRC_LP_BASE: ++ return parport_read_data(pport); ++ case LIRC_LP_STATUS: ++ return parport_read_status(pport); ++ case LIRC_LP_CONTROL: ++ return parport_read_control(pport); ++ } ++ return 0; /* make compiler happy */ ++} ++ ++static inline void out(int offset, int value) ++{ ++ switch (offset) { ++ case LIRC_LP_BASE: ++ parport_write_data(pport, value); ++ break; ++ case LIRC_LP_CONTROL: ++ parport_write_control(pport, value); ++ break; ++ case LIRC_LP_STATUS: ++ printk(KERN_INFO "%s: attempt to write to status register\n", ++ LIRC_DRIVER_NAME); ++ break; ++ } ++} ++ ++static inline unsigned int lirc_get_timer(void) ++{ ++ return in(LIRC_PORT_TIMER) & LIRC_PORT_TIMER_BIT; ++} ++ ++static inline unsigned int lirc_get_signal(void) ++{ ++ return in(LIRC_PORT_SIGNAL) & LIRC_PORT_SIGNAL_BIT; ++} ++ ++static inline void lirc_on(void) ++{ ++ out(LIRC_PORT_DATA, tx_mask); ++} ++ ++static inline void lirc_off(void) ++{ ++ out(LIRC_PORT_DATA, 0); ++} ++ ++static unsigned int init_lirc_timer(void) ++{ ++ struct timeval tv, now; ++ unsigned int level, newlevel, timeelapsed, newtimer; ++ int count = 0; ++ ++ do_gettimeofday(&tv); ++ tv.tv_sec++; /* wait max. 1 sec. */ ++ level = lirc_get_timer(); ++ do { ++ newlevel = lirc_get_timer(); ++ if (level == 0 && newlevel != 0) ++ count++; ++ level = newlevel; ++ do_gettimeofday(&now); ++ } while (count < 1000 && (now.tv_sec < tv.tv_sec ++ || (now.tv_sec == tv.tv_sec ++ && now.tv_usec < tv.tv_usec))); ++ ++ timeelapsed = ((now.tv_sec + 1 - tv.tv_sec)*1000000 ++ + (now.tv_usec - tv.tv_usec)); ++ if (count >= 1000 && timeelapsed > 0) { ++ if (default_timer == 0) { ++ /* autodetect timer */ ++ newtimer = (1000000*count)/timeelapsed; ++ printk(KERN_INFO "%s: %u Hz timer detected\n", ++ LIRC_DRIVER_NAME, newtimer); ++ return newtimer; ++ } else { ++ newtimer = (1000000*count)/timeelapsed; ++ if (abs(newtimer - default_timer) > default_timer/10) { ++ /* bad timer */ ++ printk(KERN_NOTICE "%s: bad timer: %u Hz\n", ++ LIRC_DRIVER_NAME, newtimer); ++ printk(KERN_NOTICE "%s: using default timer: " ++ "%u Hz\n", ++ LIRC_DRIVER_NAME, default_timer); ++ return default_timer; ++ } else { ++ printk(KERN_INFO "%s: %u Hz timer detected\n", ++ LIRC_DRIVER_NAME, newtimer); ++ return newtimer; /* use detected value */ ++ } ++ } ++ } else { ++ printk(KERN_NOTICE "%s: no timer detected\n", LIRC_DRIVER_NAME); ++ return 0; ++ } ++} ++ ++static int lirc_claim(void) ++{ ++ if (parport_claim(ppdevice) != 0) { ++ printk(KERN_WARNING "%s: could not claim port\n", ++ LIRC_DRIVER_NAME); ++ printk(KERN_WARNING "%s: waiting for port becoming available" ++ "\n", LIRC_DRIVER_NAME); ++ if (parport_claim_or_block(ppdevice) < 0) { ++ printk(KERN_NOTICE "%s: could not claim port, giving" ++ " up\n", LIRC_DRIVER_NAME); ++ return 0; ++ } ++ } ++ out(LIRC_LP_CONTROL, LP_PSELECP|LP_PINITP); ++ is_claimed = 1; ++ return 1; ++} ++ ++/*********************************************************************** ++ ************************* interrupt handler ************************ ++ ***********************************************************************/ ++ ++static inline void rbuf_write(int signal) ++{ ++ unsigned int nwptr; ++ ++ nwptr = (wptr + 1) & (RBUF_SIZE - 1); ++ if (nwptr == rptr) { ++ /* no new signals will be accepted */ ++ lost_irqs++; ++ printk(KERN_NOTICE "%s: buffer overrun\n", LIRC_DRIVER_NAME); ++ return; ++ } ++ rbuf[wptr] = signal; ++ wptr = nwptr; ++} ++ ++static void irq_handler(void *blah) ++{ ++ struct timeval tv; ++ static struct timeval lasttv; ++ static int init; ++ long signal; ++ int data; ++ unsigned int level, newlevel; ++ unsigned int timeout; ++ ++ if (!module_refcount(THIS_MODULE)) ++ return; ++ ++ if (!is_claimed) ++ return; ++ ++ /* disable interrupt */ ++ /* ++ disable_irq(irq); ++ out(LIRC_PORT_IRQ, in(LIRC_PORT_IRQ) & (~LP_PINTEN)); ++ */ ++ if (check_pselecd && (in(1) & LP_PSELECD)) ++ return; ++ ++#ifdef LIRC_TIMER ++ if (init) { ++ do_gettimeofday(&tv); ++ ++ signal = tv.tv_sec - lasttv.tv_sec; ++ if (signal > 15) ++ /* really long time */ ++ data = PULSE_MASK; ++ else ++ data = (int) (signal*1000000 + ++ tv.tv_usec - lasttv.tv_usec + ++ LIRC_SFH506_DELAY); ++ ++ rbuf_write(data); /* space */ ++ } else { ++ if (timer == 0) { ++ /* wake up; we'll lose this signal ++ * but it will be garbage if the device ++ * is turned on anyway */ ++ timer = init_lirc_timer(); ++ /* enable_irq(irq); */ ++ return; ++ } ++ init = 1; ++ } ++ ++ timeout = timer/10; /* timeout after 1/10 sec. */ ++ signal = 1; ++ level = lirc_get_timer(); ++ do { ++ newlevel = lirc_get_timer(); ++ if (level == 0 && newlevel != 0) ++ signal++; ++ level = newlevel; ++ ++ /* giving up */ ++ if (signal > timeout ++ || (check_pselecd && (in(1) & LP_PSELECD))) { ++ signal = 0; ++ printk(KERN_NOTICE "%s: timeout\n", LIRC_DRIVER_NAME); ++ break; ++ } ++ } while (lirc_get_signal()); ++ ++ if (signal != 0) { ++ /* ajust value to usecs */ ++ unsigned long long helper; ++ ++ helper = ((unsigned long long) signal)*1000000; ++ do_div(helper, timer); ++ signal = (long) helper; ++ ++ if (signal > LIRC_SFH506_DELAY) ++ data = signal - LIRC_SFH506_DELAY; ++ else ++ data = 1; ++ rbuf_write(PULSE_BIT|data); /* pulse */ ++ } ++ do_gettimeofday(&lasttv); ++#else ++ /* add your code here */ ++#endif ++ ++ wake_up_interruptible(&lirc_wait); ++ ++ /* enable interrupt */ ++ /* ++ enable_irq(irq); ++ out(LIRC_PORT_IRQ, in(LIRC_PORT_IRQ)|LP_PINTEN); ++ */ ++} ++ ++/*********************************************************************** ++ ************************** file_operations ************************ ++ ***********************************************************************/ ++ ++static loff_t lirc_lseek(struct file *filep, loff_t offset, int orig) ++{ ++ return -ESPIPE; ++} ++ ++static ssize_t lirc_read(struct file *filep, char *buf, size_t n, loff_t *ppos) ++{ ++ int result = 0; ++ int count = 0; ++ DECLARE_WAITQUEUE(wait, current); ++ ++ if (n % sizeof(int)) ++ return -EINVAL; ++ ++ add_wait_queue(&lirc_wait, &wait); ++ set_current_state(TASK_INTERRUPTIBLE); ++ while (count < n) { ++ if (rptr != wptr) { ++ if (copy_to_user(buf+count, (char *) &rbuf[rptr], ++ sizeof(int))) { ++ result = -EFAULT; ++ break; ++ } ++ rptr = (rptr + 1) & (RBUF_SIZE - 1); ++ count += sizeof(int); ++ } else { ++ if (filep->f_flags & O_NONBLOCK) { ++ result = -EAGAIN; ++ break; ++ } ++ if (signal_pending(current)) { ++ result = -ERESTARTSYS; ++ break; ++ } ++ schedule(); ++ set_current_state(TASK_INTERRUPTIBLE); ++ } ++ } ++ remove_wait_queue(&lirc_wait, &wait); ++ set_current_state(TASK_RUNNING); ++ return count ? count : result; ++} ++ ++static ssize_t lirc_write(struct file *filep, const char *buf, size_t n, ++ loff_t *ppos) ++{ ++ int count; ++ unsigned int i; ++ unsigned int level, newlevel; ++ unsigned long flags; ++ int counttimer; ++ ++ if (!is_claimed) ++ return -EBUSY; ++ ++ if (n % sizeof(int)) ++ return -EINVAL; ++ ++ count = n / sizeof(int); ++ ++ if (count > WBUF_SIZE || count % 2 == 0) ++ return -EINVAL; ++ ++ if (copy_from_user(wbuf, buf, n)) ++ return -EFAULT; ++ ++#ifdef LIRC_TIMER ++ if (timer == 0) { ++ /* try again if device is ready */ ++ timer = init_lirc_timer(); ++ if (timer == 0) ++ return -EIO; ++ } ++ ++ /* ajust values from usecs */ ++ for (i = 0; i < count; i++) { ++ unsigned long long helper; ++ ++ helper = ((unsigned long long) wbuf[i])*timer; ++ do_div(helper, 1000000); ++ wbuf[i] = (int) helper; ++ } ++ ++ local_irq_save(flags); ++ i = 0; ++ while (i < count) { ++ level = lirc_get_timer(); ++ counttimer = 0; ++ lirc_on(); ++ do { ++ newlevel = lirc_get_timer(); ++ if (level == 0 && newlevel != 0) ++ counttimer++; ++ level = newlevel; ++ if (check_pselecd && (in(1) & LP_PSELECD)) { ++ lirc_off(); ++ local_irq_restore(flags); ++ return -EIO; ++ } ++ } while (counttimer < wbuf[i]); ++ i++; ++ ++ lirc_off(); ++ if (i == count) ++ break; ++ counttimer = 0; ++ do { ++ newlevel = lirc_get_timer(); ++ if (level == 0 && newlevel != 0) ++ counttimer++; ++ level = newlevel; ++ if (check_pselecd && (in(1) & LP_PSELECD)) { ++ local_irq_restore(flags); ++ return -EIO; ++ } ++ } while (counttimer < wbuf[i]); ++ i++; ++ } ++ local_irq_restore(flags); ++#else ++ /* place code that handles write ++ * without external timer here */ ++#endif ++ return n; ++} ++ ++static unsigned int lirc_poll(struct file *file, poll_table *wait) ++{ ++ poll_wait(file, &lirc_wait, wait); ++ if (rptr != wptr) ++ return POLLIN | POLLRDNORM; ++ return 0; ++} ++ ++static int lirc_ioctl(struct inode *node, struct file *filep, unsigned int cmd, ++ unsigned long arg) ++{ ++ int result; ++ unsigned long features = LIRC_CAN_SET_TRANSMITTER_MASK | ++ LIRC_CAN_SEND_PULSE | LIRC_CAN_REC_MODE2; ++ unsigned long mode; ++ unsigned int ivalue; ++ ++ switch (cmd) { ++ case LIRC_GET_FEATURES: ++ result = put_user(features, (unsigned long *) arg); ++ if (result) ++ return result; ++ break; ++ case LIRC_GET_SEND_MODE: ++ result = put_user(LIRC_MODE_PULSE, (unsigned long *) arg); ++ if (result) ++ return result; ++ break; ++ case LIRC_GET_REC_MODE: ++ result = put_user(LIRC_MODE_MODE2, (unsigned long *) arg); ++ if (result) ++ return result; ++ break; ++ case LIRC_SET_SEND_MODE: ++ result = get_user(mode, (unsigned long *) arg); ++ if (result) ++ return result; ++ if (mode != LIRC_MODE_PULSE) ++ return -EINVAL; ++ break; ++ case LIRC_SET_REC_MODE: ++ result = get_user(mode, (unsigned long *) arg); ++ if (result) ++ return result; ++ if (mode != LIRC_MODE_MODE2) ++ return -ENOSYS; ++ break; ++ case LIRC_SET_TRANSMITTER_MASK: ++ result = get_user(ivalue, (unsigned int *) arg); ++ if (result) ++ return result; ++ if ((ivalue & LIRC_PARALLEL_TRANSMITTER_MASK) != ivalue) ++ return LIRC_PARALLEL_MAX_TRANSMITTERS; ++ tx_mask = ivalue; ++ break; ++ default: ++ return -ENOIOCTLCMD; ++ } ++ return 0; ++} ++ ++static int lirc_open(struct inode *node, struct file *filep) ++{ ++ if (module_refcount(THIS_MODULE) || !lirc_claim()) ++ return -EBUSY; ++ ++ parport_enable_irq(pport); ++ ++ /* init read ptr */ ++ rptr = 0; ++ wptr = 0; ++ lost_irqs = 0; ++ ++ is_open = 1; ++ return 0; ++} ++ ++static int lirc_close(struct inode *node, struct file *filep) ++{ ++ if (is_claimed) { ++ is_claimed = 0; ++ parport_release(ppdevice); ++ } ++ is_open = 0; ++ return 0; ++} ++ ++static struct file_operations lirc_fops = { ++ .llseek = lirc_lseek, ++ .read = lirc_read, ++ .write = lirc_write, ++ .poll = lirc_poll, ++ .ioctl = lirc_ioctl, ++ .open = lirc_open, ++ .release = lirc_close ++}; ++ ++static int set_use_inc(void *data) ++{ ++ return 0; ++} ++ ++static void set_use_dec(void *data) ++{ ++} ++ ++static struct lirc_plugin plugin = { ++ .name = LIRC_DRIVER_NAME, ++ .minor = -1, ++ .code_length = 1, ++ .sample_rate = 0, ++ .data = NULL, ++ .add_to_buf = NULL, ++ .get_queue = NULL, ++ .set_use_inc = set_use_inc, ++ .set_use_dec = set_use_dec, ++ .fops = &lirc_fops, ++ .dev = NULL, ++ .owner = THIS_MODULE, ++}; ++ ++#ifdef MODULE ++ ++static int pf(void *handle); ++static void kf(void *handle); ++ ++static struct timer_list poll_timer; ++static void poll_state(unsigned long ignored); ++ ++static void poll_state(unsigned long ignored) ++{ ++ printk(KERN_NOTICE "%s: time\n", ++ LIRC_DRIVER_NAME); ++ del_timer(&poll_timer); ++ if (is_claimed) ++ return; ++ kf(NULL); ++ if (!is_claimed) { ++ printk(KERN_NOTICE "%s: could not claim port, giving up\n", ++ LIRC_DRIVER_NAME); ++ init_timer(&poll_timer); ++ poll_timer.expires = jiffies + HZ; ++ poll_timer.data = (unsigned long)current; ++ poll_timer.function = poll_state; ++ add_timer(&poll_timer); ++ } ++} ++ ++static int pf(void *handle) ++{ ++ parport_disable_irq(pport); ++ is_claimed = 0; ++ return 0; ++} ++ ++static void kf(void *handle) ++{ ++ if (!is_open) ++ return; ++ if (!lirc_claim()) ++ return; ++ parport_enable_irq(pport); ++ lirc_off(); ++ /* this is a bit annoying when you actually print...*/ ++ /* ++ printk(KERN_INFO "%s: reclaimed port\n", LIRC_DRIVER_NAME); ++ */ ++} ++ ++/*********************************************************************** ++ ****************** init_module()/cleanup_module() ****************** ++ ***********************************************************************/ ++ ++int init_module(void) ++{ ++ pport = parport_find_base(io); ++ if (pport == NULL) { ++ printk(KERN_NOTICE "%s: no port at %x found\n", ++ LIRC_DRIVER_NAME, io); ++ return -ENXIO; ++ } ++ ppdevice = parport_register_device(pport, LIRC_DRIVER_NAME, ++ pf, kf, irq_handler, 0, NULL); ++ parport_put_port(pport); ++ if (ppdevice == NULL) { ++ printk(KERN_NOTICE "%s: parport_register_device() failed\n", ++ LIRC_DRIVER_NAME); ++ return -ENXIO; ++ } ++ if (parport_claim(ppdevice) != 0) ++ goto skip_init; ++ is_claimed = 1; ++ out(LIRC_LP_CONTROL, LP_PSELECP|LP_PINITP); ++ ++#ifdef LIRC_TIMER ++ if (debug) ++ out(LIRC_PORT_DATA, tx_mask); ++ ++ timer = init_lirc_timer(); ++ ++#if 0 /* continue even if device is offline */ ++ if (timer == 0) { ++ is_claimed = 0; ++ parport_release(pport); ++ parport_unregister_device(ppdevice); ++ return -EIO; ++ } ++ ++#endif ++ if (debug) ++ out(LIRC_PORT_DATA, 0); ++#endif ++ ++ is_claimed = 0; ++ parport_release(ppdevice); ++ skip_init: ++ plugin.minor = lirc_register_plugin(&plugin); ++ if (plugin.minor < 0) { ++ printk(KERN_NOTICE "%s: register_chrdev() failed\n", ++ LIRC_DRIVER_NAME); ++ parport_unregister_device(ppdevice); ++ return -EIO; ++ } ++ printk(KERN_INFO "%s: installed using port 0x%04x irq %d\n", ++ LIRC_DRIVER_NAME, io, irq); ++ return 0; ++} ++ ++void cleanup_module(void) ++{ ++ parport_unregister_device(ppdevice); ++ lirc_unregister_plugin(plugin.minor); ++} ++ ++MODULE_DESCRIPTION("Infrared receiver driver for parallel ports."); ++MODULE_AUTHOR("Christoph Bartelmus"); ++MODULE_LICENSE("GPL"); ++ ++module_param(io, int, 0444); ++MODULE_PARM_DESC(io, "I/O address base (0x3bc, 0x378 or 0x278)"); ++ ++module_param(irq, int, 0444); ++MODULE_PARM_DESC(irq, "Interrupt (7 or 5)"); ++ ++module_param(tx_mask, int, 0444); ++MODULE_PARM_DESC(tx_maxk, "Transmitter mask (default: 0x01)"); ++ ++module_param(debug, bool, 0644); ++MODULE_PARM_DESC(debug, "Enable debugging messages"); ++ ++module_param(check_pselecd, bool, 0644); ++MODULE_PARM_DESC(debug, "Check for printer (default: 0)"); ++ ++#endif /* MODULE */ +diff --git a/drivers/input/lirc/lirc_parallel.h b/drivers/input/lirc/lirc_parallel.h +new file mode 100644 +index 0000000..4bed6af +--- /dev/null ++++ b/drivers/input/lirc/lirc_parallel.h +@@ -0,0 +1,26 @@ ++/* lirc_parallel.h */ ++ ++#ifndef _LIRC_PARALLEL_H ++#define _LIRC_PARALLEL_H ++ ++#include ++ ++#define LIRC_PORT_LEN 3 ++ ++#define LIRC_LP_BASE 0 ++#define LIRC_LP_STATUS 1 ++#define LIRC_LP_CONTROL 2 ++ ++#define LIRC_PORT_DATA LIRC_LP_BASE /* base */ ++#define LIRC_PORT_TIMER LIRC_LP_STATUS /* status port */ ++#define LIRC_PORT_TIMER_BIT LP_PBUSY /* busy signal */ ++#define LIRC_PORT_SIGNAL LIRC_LP_STATUS /* status port */ ++#define LIRC_PORT_SIGNAL_BIT LP_PACK /* ack signal */ ++#define LIRC_PORT_IRQ LIRC_LP_CONTROL /* control port */ ++ ++#define LIRC_SFH506_DELAY 0 /* delay t_phl in usecs */ ++ ++#define LIRC_PARALLEL_MAX_TRANSMITTERS 8 ++#define LIRC_PARALLEL_TRANSMITTER_MASK ((1< ++ * Tim Davies ++ * ++ * This driver was derived from: ++ * Venky Raju ++ * "lirc_imon - "LIRC plugin/VFD driver for Ahanix/Soundgraph IMON IR/VFD" ++ * Paul Miller 's 2003-2004 ++ * "lirc_atiusb - USB remote support for LIRC" ++ * Culver Consulting Services 's 2003 ++ * "Sasem OnAir VFD/IR USB driver" ++ * ++ * ++ * 2004/06/13 - 0.1 ++ * initial version ++ * ++ * 2004/06/28 - 0.2 ++ * added file system support to write data to VFD device (used ++ * in conjunction with LCDProc) ++ * ++ * 2004/11/22 - 0.3 ++ * Ported to 2.6 kernel ++ * - Tim Davies ++ * ++ * 2005/03/29 - 0.4 ++ * A few tidyups and keypress timings ++ * - Tim Davies ++ * ++ * 2005/06/23 - 0.5 ++ * A complete rewrite (shamelessly) based on lirc_imon.c ++ * Tim Davies ++ * ++ * NOTE - The LCDproc iMon driver should work with this module. More info at ++ * http://www.frogstorm.info/sasem ++ */ ++ ++/* ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ++ */ ++ ++#include ++ ++ ++#include ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "lirc.h" ++#include "lirc_dev.h" ++ ++ ++#define MOD_AUTHOR "Oliver Stabel , " \ ++ "Tim Davies " ++#define MOD_DESC "USB Driver for Sasem Remote Controller V1.1" ++#define MOD_NAME "lirc_sasem" ++#define MOD_VERSION "0.5" ++ ++#define VFD_MINOR_BASE 144 /* Same as LCD */ ++#define DEVICE_NAME "lcd%d" ++ ++#define BUF_CHUNK_SIZE 8 ++#define BUF_SIZE 128 ++ ++#define SUCCESS 0 ++#define TRUE 1 ++#define FALSE 0 ++ ++#define IOCTL_LCD_CONTRAST 1 ++ ++/* ------------------------------------------------------------ ++ * P R O T O T Y P E S ++ * ------------------------------------------------------------ ++ */ ++ ++/* USB Callback prototypes */ ++static int sasem_probe(struct usb_interface *interface, ++ const struct usb_device_id *id); ++static void sasem_disconnect(struct usb_interface *interface); ++static void usb_rx_callback(struct urb *urb); ++static void usb_tx_callback(struct urb *urb); ++ ++/* VFD file_operations function prototypes */ ++static int vfd_open(struct inode *inode, struct file *file); ++static int vfd_ioctl(struct inode *inode, struct file *file, ++ unsigned cmd, unsigned long arg); ++static int vfd_close(struct inode *inode, struct file *file); ++static ssize_t vfd_write(struct file *file, const char *buf, ++ size_t n_bytes, loff_t *pos); ++ ++/* LIRC plugin function prototypes */ ++static int ir_open(void *data); ++static void ir_close(void *data); ++ ++/* Driver init/exit prototypes */ ++static int __init sasem_init(void); ++static void __exit sasem_exit(void); ++ ++/* ------------------------------------------------------------ ++ * G L O B A L S ++ * ------------------------------------------------------------ ++ */ ++ ++struct sasem_context { ++ ++ struct usb_device *dev; ++ int vfd_isopen; /* VFD port has been opened */ ++ unsigned int vfd_contrast; /* VFD contrast */ ++ int ir_isopen; /* IR port has been opened */ ++ int dev_present; /* USB device presence */ ++ struct mutex lock; /* to lock this object */ ++ wait_queue_head_t remove_ok; /* For unexpected USB disconnects */ ++ ++ struct lirc_plugin *plugin; ++ struct usb_endpoint_descriptor *rx_endpoint; ++ struct usb_endpoint_descriptor *tx_endpoint; ++ struct urb *rx_urb; ++ struct urb *tx_urb; ++ unsigned char usb_rx_buf[8]; ++ unsigned char usb_tx_buf[8]; ++ ++ struct tx_t { ++ unsigned char data_buf[32]; /* user data buffer */ ++ struct completion finished; /* wait for write to finish */ ++ atomic_t busy; /* write in progress */ ++ int status; /* status of tx completion */ ++ } tx; ++ ++ /* for dealing with repeat codes (wish there was a toggle bit!) */ ++ struct timeval presstime; ++ char lastcode[8]; ++ int codesaved; ++}; ++ ++#define LOCK_CONTEXT mutex_lock(&context->lock) ++#define UNLOCK_CONTEXT mutex_unlock(&context->lock) ++ ++/* VFD file operations */ ++static struct file_operations vfd_fops = { ++ ++ .owner = THIS_MODULE, ++ .open = &vfd_open, ++ .write = &vfd_write, ++ .ioctl = &vfd_ioctl, ++ .release = &vfd_close ++}; ++ ++/* USB Device ID for Sasem USB Control Board */ ++static struct usb_device_id sasem_usb_id_table[] = { ++ /* Sasem USB Control Board */ ++ { USB_DEVICE(0x11ba, 0x0101) }, ++ /* Terminiating entry */ ++ {} ++}; ++ ++/* USB Device data */ ++static struct usb_driver sasem_driver = { ++ .name = MOD_NAME, ++ .probe = sasem_probe, ++ .disconnect = sasem_disconnect, ++ .id_table = sasem_usb_id_table, ++}; ++ ++static struct usb_class_driver sasem_class = { ++ .name = DEVICE_NAME, ++ .fops = &vfd_fops, ++ .minor_base = VFD_MINOR_BASE, ++}; ++ ++/* to prevent races between open() and disconnect() */ ++static DECLARE_MUTEX(disconnect_sem); ++ ++static int debug; ++ ++ ++/* ------------------------------------------------------------ ++ * M O D U L E C O D E ++ * ------------------------------------------------------------ ++ */ ++ ++MODULE_AUTHOR(MOD_AUTHOR); ++MODULE_DESCRIPTION(MOD_DESC); ++MODULE_LICENSE("GPL"); ++module_param(debug, int, 0); ++MODULE_PARM_DESC(debug, "Debug messages: 0=no, 1=yes (default: no)"); ++ ++static inline void delete_context(struct sasem_context *context) ++{ ++ usb_free_urb(context->tx_urb); /* VFD */ ++ usb_free_urb(context->rx_urb); /* IR */ ++ lirc_buffer_free(context->plugin->rbuf); ++ kfree(context->plugin->rbuf); ++ kfree(context->plugin); ++ kfree(context); ++ ++ if (debug) ++ info("%s: context deleted", __func__); ++} ++ ++static inline void deregister_from_lirc(struct sasem_context *context) ++{ ++ int retval; ++ int minor = context->plugin->minor; ++ ++ retval = lirc_unregister_plugin(minor); ++ if (retval) ++ err("%s: unable to deregister from lirc (%d)", ++ __func__, retval); ++ else ++ info("Deregistered Sasem plugin (minor:%d)", minor); ++ ++} ++ ++/** ++ * Called when the VFD device (e.g. /dev/usb/lcd) ++ * is opened by the application. ++ */ ++static int vfd_open(struct inode *inode, struct file *file) ++{ ++ struct usb_interface *interface; ++ struct sasem_context *context = NULL; ++ int subminor; ++ int retval = SUCCESS; ++ ++ /* prevent races with disconnect */ ++ down(&disconnect_sem); ++ ++ subminor = iminor(inode); ++ interface = usb_find_interface(&sasem_driver, subminor); ++ if (!interface) { ++ err("%s: could not find interface for minor %d", ++ __func__, subminor); ++ retval = -ENODEV; ++ goto exit; ++ } ++ context = usb_get_intfdata(interface); ++ ++ if (!context) { ++ err("%s: no context found for minor %d", ++ __func__, subminor); ++ retval = -ENODEV; ++ goto exit; ++ } ++ ++ LOCK_CONTEXT; ++ ++ if (context->vfd_isopen) { ++ err("%s: VFD port is already open", __func__); ++ retval = -EBUSY; ++ } else { ++ context->vfd_isopen = TRUE; ++ file->private_data = context; ++ info("VFD port opened"); ++ } ++ ++ UNLOCK_CONTEXT; ++ ++exit: ++ up(&disconnect_sem); ++ return retval; ++} ++ ++/** ++ * Called when the VFD device (e.g. /dev/usb/lcd) ++ * is closed by the application. ++ */ ++static int vfd_ioctl(struct inode *inode, struct file *file, ++ unsigned cmd, unsigned long arg) ++{ ++ struct sasem_context *context = NULL; ++ ++ context = (struct sasem_context *) file->private_data; ++ ++ if (!context) { ++ err("%s: no context for device", __func__); ++ return -ENODEV; ++ } ++ ++ LOCK_CONTEXT; ++ ++ switch (cmd) { ++ case IOCTL_LCD_CONTRAST: ++ if (arg > 1000) ++ arg = 1000; ++ if (arg < 0) ++ arg = 0; ++ context->vfd_contrast = (unsigned int)arg; ++ break; ++ default: ++ info("Unknown IOCTL command"); ++ UNLOCK_CONTEXT; ++ return -ENOIOCTLCMD; /* not supported */ ++ } ++ ++ UNLOCK_CONTEXT; ++ return 0; ++} ++ ++/** ++ * Called when the VFD device (e.g. /dev/usb/lcd) ++ * is closed by the application. ++ */ ++static int vfd_close(struct inode *inode, struct file *file) ++{ ++ struct sasem_context *context = NULL; ++ int retval = SUCCESS; ++ ++ context = (struct sasem_context *) file->private_data; ++ ++ if (!context) { ++ err("%s: no context for device", __func__); ++ return -ENODEV; ++ } ++ ++ LOCK_CONTEXT; ++ ++ if (!context->vfd_isopen) { ++ err("%s: VFD is not open", __func__); ++ retval = -EIO; ++ } else { ++ context->vfd_isopen = FALSE; ++ info("VFD port closed"); ++ if (!context->dev_present && !context->ir_isopen) { ++ ++ /* Device disconnected before close and IR port is ++ * not open. If IR port is open, context will be ++ * deleted by ir_close. */ ++ UNLOCK_CONTEXT; ++ delete_context(context); ++ return retval; ++ } ++ } ++ ++ UNLOCK_CONTEXT; ++ return retval; ++} ++ ++/** ++ * Sends a packet to the VFD. ++ */ ++static inline int send_packet(struct sasem_context *context) ++{ ++ unsigned int pipe; ++ int interval = 0; ++ int retval = SUCCESS; ++ ++ pipe = usb_sndintpipe(context->dev, ++ context->tx_endpoint->bEndpointAddress); ++ interval = context->tx_endpoint->bInterval; ++ ++ usb_fill_int_urb(context->tx_urb, context->dev, pipe, ++ context->usb_tx_buf, sizeof(context->usb_tx_buf), ++ usb_tx_callback, context, interval); ++ ++ context->tx_urb->actual_length = 0; ++ ++ init_completion(&context->tx.finished); ++ atomic_set(&(context->tx.busy), 1); ++ ++ retval = usb_submit_urb(context->tx_urb, GFP_KERNEL); ++ if (retval != SUCCESS) { ++ atomic_set(&(context->tx.busy), 0); ++ err("%s: error submitting urb (%d)", __func__, retval); ++ } else { ++ /* Wait for tranmission to complete (or abort) */ ++ UNLOCK_CONTEXT; ++ wait_for_completion(&context->tx.finished); ++ LOCK_CONTEXT; ++ ++ retval = context->tx.status; ++ if (retval != SUCCESS) ++ err("%s: packet tx failed (%d)", __func__, retval); ++ } ++ ++ return retval; ++} ++ ++/** ++ * Writes data to the VFD. The Sasem VFD is 2x16 characters ++ * and requires data in 9 consecutive USB interrupt packets, ++ * each packet carrying 8 bytes. ++ */ ++static ssize_t vfd_write(struct file *file, const char *buf, ++ size_t n_bytes, loff_t *pos) ++{ ++ int i; ++ int retval = SUCCESS; ++ struct sasem_context *context; ++ ++ context = (struct sasem_context *) file->private_data; ++ if (!context) { ++ err("%s: no context for device", __func__); ++ return -ENODEV; ++ } ++ ++ LOCK_CONTEXT; ++ ++ if (!context->dev_present) { ++ err("%s: no Sasem device present", __func__); ++ retval = -ENODEV; ++ goto exit; ++ } ++ ++ if (n_bytes <= 0 || n_bytes > 32) { ++ err("%s: invalid payload size", __func__); ++ retval = -EINVAL; ++ goto exit; ++ } ++ ++ retval = copy_from_user(context->tx.data_buf, buf, n_bytes); ++ if (retval < 0) ++ goto exit; ++ ++ /* Pad with spaces */ ++ for (i = n_bytes; i < 32; ++i) ++ context->tx.data_buf[i] = ' '; ++ ++ /* Nine 8 byte packets to be sent */ ++ /* NOTE: "\x07\x01\0\0\0\0\0\0" or "\x0c\0\0\0\0\0\0\0" ++ * will clear the VFD */ ++ for (i = 0; i < 9; i++) { ++ switch (i) { ++ case 0: ++ memcpy(context->usb_tx_buf, "\x07\0\0\0\0\0\0\0", 8); ++ context->usb_tx_buf[1] = (context->vfd_contrast) ? ++ (0x2B - (context->vfd_contrast - 1) / 250) ++ : 0x2B; ++ break; ++ case 1: ++ memcpy(context->usb_tx_buf, "\x09\x01\0\0\0\0\0\0", 8); ++ break; ++ case 2: ++ memcpy(context->usb_tx_buf, "\x0b\x01\0\0\0\0\0\0", 8); ++ break; ++ case 3: ++ memcpy(context->usb_tx_buf, context->tx.data_buf, 8); ++ break; ++ case 4: ++ memcpy(context->usb_tx_buf, ++ context->tx.data_buf + 8, 8); ++ break; ++ case 5: ++ memcpy(context->usb_tx_buf, "\x09\x01\0\0\0\0\0\0", 8); ++ break; ++ case 6: ++ memcpy(context->usb_tx_buf, "\x0b\x02\0\0\0\0\0\0", 8); ++ break; ++ case 7: ++ memcpy(context->usb_tx_buf, ++ context->tx.data_buf + 16, 8); ++ break; ++ case 8: ++ memcpy(context->usb_tx_buf, ++ context->tx.data_buf + 24, 8); ++ break; ++ } ++ retval = send_packet(context); ++ if (retval != SUCCESS) { ++ ++ err("%s: send packet failed for packet #%d", ++ __func__, i); ++ goto exit; ++ } ++ } ++exit: ++ ++ UNLOCK_CONTEXT; ++ ++ return (retval == SUCCESS) ? n_bytes : retval; ++} ++ ++/** ++ * Callback function for USB core API: transmit data ++ */ ++static void usb_tx_callback(struct urb *urb) ++{ ++ struct sasem_context *context; ++ ++ if (!urb) ++ return; ++ context = (struct sasem_context *) urb->context; ++ if (!context) ++ return; ++ ++ context->tx.status = urb->status; ++ ++ /* notify waiters that write has finished */ ++ atomic_set(&context->tx.busy, 0); ++ complete(&context->tx.finished); ++ ++ return; ++} ++ ++/** ++ * Called by lirc_dev when the application opens /dev/lirc ++ */ ++static int ir_open(void *data) ++{ ++ int retval = SUCCESS; ++ struct sasem_context *context; ++ ++ /* prevent races with disconnect */ ++ down(&disconnect_sem); ++ ++ context = (struct sasem_context *) data; ++ ++ LOCK_CONTEXT; ++ ++ if (context->ir_isopen) { ++ err("%s: IR port is already open", __func__); ++ retval = -EBUSY; ++ goto exit; ++ } ++ ++ usb_fill_int_urb(context->rx_urb, context->dev, ++ usb_rcvintpipe(context->dev, ++ context->rx_endpoint->bEndpointAddress), ++ context->usb_rx_buf, sizeof(context->usb_rx_buf), ++ usb_rx_callback, context, context->rx_endpoint->bInterval); ++ ++ retval = usb_submit_urb(context->rx_urb, GFP_KERNEL); ++ ++ if (retval) ++ err("%s: usb_submit_urb failed for ir_open (%d)", ++ __func__, retval); ++ else { ++ context->ir_isopen = TRUE; ++ info("IR port opened"); ++ } ++ ++exit: ++ UNLOCK_CONTEXT; ++ ++ up(&disconnect_sem); ++ return SUCCESS; ++} ++ ++/** ++ * Called by lirc_dev when the application closes /dev/lirc ++ */ ++static void ir_close(void *data) ++{ ++ struct sasem_context *context; ++ ++ context = (struct sasem_context *)data; ++ if (!context) { ++ err("%s: no context for device", __func__); ++ return; ++ } ++ ++ LOCK_CONTEXT; ++ ++ usb_kill_urb(context->rx_urb); ++ context->ir_isopen = FALSE; ++ info("IR port closed"); ++ ++ if (!context->dev_present) { ++ ++ /* ++ * Device disconnected while IR port was ++ * still open. Plugin was not deregistered ++ * at disconnect time, so do it now. ++ */ ++ deregister_from_lirc(context); ++ ++ if (!context->vfd_isopen) { ++ ++ UNLOCK_CONTEXT; ++ delete_context(context); ++ return; ++ } ++ /* If VFD port is open, context will be deleted by vfd_close */ ++ } ++ ++ UNLOCK_CONTEXT; ++ return; ++} ++ ++/** ++ * Process the incoming packet ++ */ ++static inline void incoming_packet(struct sasem_context *context, ++ struct urb *urb) ++{ ++ int len = urb->actual_length; ++ unsigned char *buf = urb->transfer_buffer; ++ long ms; ++ struct timeval tv; ++ ++ if (len != 8) { ++ warn("%s: invalid incoming packet size (%d)", ++ __func__, len); ++ return; ++ } ++ ++#ifdef DEBUG ++ int i; ++ for (i = 0; i < 8; ++i) ++ printk(KERN_INFO "%02x ", buf[i]); ++ printk(KERN_INFO "\n"); ++#endif ++ ++ /* Lirc could deal with the repeat code, but we really need to block it ++ * if it arrives too late. Otherwise we could repeat the wrong code. */ ++ ++ /* get the time since the last button press */ ++ do_gettimeofday(&tv); ++ ms = (tv.tv_sec - context->presstime.tv_sec) * 1000 + ++ (tv.tv_usec - context->presstime.tv_usec) / 1000; ++ ++ if (memcmp(buf, "\x08\0\0\0\0\0\0\0", 8) == 0) { ++ /* the repeat code is being sent, so we copy ++ * the old code to LIRC */ ++ ++ /* NOTE: Only if the last code was less than 250ms ago ++ * - no one should be able to push another (undetected) button ++ * in that time and then get a false repeat of the previous ++ * press but it is long enough for a genuine repeat */ ++ if ((ms < 250) && (context->codesaved != 0)) { ++ memcpy(buf, &context->lastcode, 8); ++ context->presstime.tv_sec = tv.tv_sec; ++ context->presstime.tv_usec = tv.tv_usec; ++ } ++ } else { ++ /* save the current valid code for repeats */ ++ memcpy(&context->lastcode, buf, 8); ++ /* set flag to signal a valid code was save; ++ * just for safety reasons */ ++ context->codesaved = 1; ++ context->presstime.tv_sec = tv.tv_sec; ++ context->presstime.tv_usec = tv.tv_usec; ++ } ++ ++ lirc_buffer_write_1(context->plugin->rbuf, buf); ++ wake_up(&context->plugin->rbuf->wait_poll); ++} ++ ++/** ++ * Callback function for USB core API: receive data ++ */ ++static void usb_rx_callback(struct urb *urb) ++{ ++ struct sasem_context *context; ++ ++ if (!urb) ++ return; ++ context = (struct sasem_context *) urb->context; ++ if (!context) ++ return; ++ ++ switch (urb->status) { ++ ++ case -ENOENT: /* usbcore unlink successful! */ ++ return; ++ ++ case SUCCESS: ++ if (context->ir_isopen) ++ incoming_packet(context, urb); ++ break; ++ ++ default: ++ warn("%s: status (%d): ignored", ++ __func__, urb->status); ++ break; ++ } ++ ++ usb_submit_urb(context->rx_urb, GFP_ATOMIC); ++ return; ++} ++ ++ ++ ++/** ++ * Callback function for USB core API: Probe ++ */ ++static int sasem_probe(struct usb_interface *interface, ++ const struct usb_device_id *id) ++{ ++ struct usb_device *dev = NULL; ++ struct usb_host_interface *iface_desc = NULL; ++ struct usb_endpoint_descriptor *rx_endpoint = NULL; ++ struct usb_endpoint_descriptor *tx_endpoint = NULL; ++ struct urb *rx_urb = NULL; ++ struct urb *tx_urb = NULL; ++ struct lirc_plugin *plugin = NULL; ++ struct lirc_buffer *rbuf = NULL; ++ int lirc_minor = 0; ++ int num_endpoints; ++ int retval = SUCCESS; ++ int vfd_ep_found; ++ int ir_ep_found; ++ int alloc_status; ++ struct sasem_context *context = NULL; ++ int i; ++ ++ info("%s: found Sasem device", __func__); ++ ++ ++ dev = usb_get_dev(interface_to_usbdev(interface)); ++ iface_desc = interface->cur_altsetting; ++ num_endpoints = iface_desc->desc.bNumEndpoints; ++ ++ /* ++ * Scan the endpoint list and set: ++ * first input endpoint = IR endpoint ++ * first output endpoint = VFD endpoint ++ */ ++ ++ ir_ep_found = FALSE; ++ vfd_ep_found = FALSE; ++ ++ for (i = 0; i < num_endpoints && !(ir_ep_found && vfd_ep_found); ++i) { ++ ++ struct usb_endpoint_descriptor *ep; ++ int ep_dir; ++ int ep_type; ++ ep = &iface_desc->endpoint [i].desc; ++ ep_dir = ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK; ++ ep_type = ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; ++ ++ if (!ir_ep_found && ++ ep_dir == USB_DIR_IN && ++ ep_type == USB_ENDPOINT_XFER_INT) { ++ ++ rx_endpoint = ep; ++ ir_ep_found = TRUE; ++ if (debug) ++ info("%s: found IR endpoint", __func__); ++ ++ } else if (!vfd_ep_found && ++ ep_dir == USB_DIR_OUT && ++ ep_type == USB_ENDPOINT_XFER_INT) { ++ ++ tx_endpoint = ep; ++ vfd_ep_found = TRUE; ++ if (debug) ++ info("%s: found VFD endpoint", __func__); ++ } ++ } ++ ++ /* Input endpoint is mandatory */ ++ if (!ir_ep_found) { ++ ++ err("%s: no valid input (IR) endpoint found.", __func__); ++ retval = -ENODEV; ++ goto exit; ++ } ++ ++ /* Warning if no VFD endpoint */ ++ if (!vfd_ep_found) ++ info("%s: no valid output (VFD) endpoint found.", __func__); ++ ++ ++ /* Allocate memory */ ++ alloc_status = SUCCESS; ++ ++ context = kmalloc(sizeof(struct sasem_context), GFP_KERNEL); ++ if (!context) { ++ err("%s: kmalloc failed for context", __func__); ++ alloc_status = 1; ++ goto alloc_status_switch; ++ } ++ plugin = kmalloc(sizeof(struct lirc_plugin), GFP_KERNEL); ++ if (!plugin) { ++ err("%s: kmalloc failed for lirc_plugin", __func__); ++ alloc_status = 2; ++ goto alloc_status_switch; ++ } ++ rbuf = kmalloc(sizeof(struct lirc_buffer), GFP_KERNEL); ++ if (!rbuf) { ++ err("%s: kmalloc failed for lirc_buffer", __func__); ++ alloc_status = 3; ++ goto alloc_status_switch; ++ } ++ if (lirc_buffer_init(rbuf, BUF_CHUNK_SIZE, BUF_SIZE)) { ++ err("%s: lirc_buffer_init failed", __func__); ++ alloc_status = 4; ++ goto alloc_status_switch; ++ } ++ rx_urb = usb_alloc_urb(0, GFP_KERNEL); ++ if (!rx_urb) { ++ err("%s: usb_alloc_urb failed for IR urb", __func__); ++ alloc_status = 5; ++ goto alloc_status_switch; ++ } ++ if (vfd_ep_found) { ++ tx_urb = usb_alloc_urb(0, GFP_KERNEL); ++ if (!tx_urb) { ++ err("%s: usb_alloc_urb failed for VFD urb", ++ __func__); ++ alloc_status = 6; ++ goto alloc_status_switch; ++ } ++ } ++ ++ /* clear all members of sasem_context and lirc_plugin */ ++ memset(context, 0, sizeof(struct sasem_context)); ++ mutex_init(&context->lock); ++ ++ memset(plugin, 0, sizeof(struct lirc_plugin)); ++ ++ strcpy(plugin->name, MOD_NAME); ++ plugin->minor = -1; ++ plugin->code_length = 64; ++ plugin->sample_rate = 0; ++ plugin->features = LIRC_CAN_REC_LIRCCODE; ++ plugin->data = context; ++ plugin->rbuf = rbuf; ++ plugin->set_use_inc = ir_open; ++ plugin->set_use_dec = ir_close; ++ plugin->dev = &dev->dev; ++ plugin->owner = THIS_MODULE; ++ ++ LOCK_CONTEXT; ++ ++ lirc_minor = lirc_register_plugin(plugin); ++ if (lirc_minor < 0) { ++ err("%s: lirc_register_plugin failed", __func__); ++ alloc_status = 7; ++ UNLOCK_CONTEXT; ++ } else ++ info("%s: Registered Sasem plugin (minor:%d)", ++ __func__, lirc_minor); ++ ++alloc_status_switch: ++ ++ switch (alloc_status) { ++ ++ case 7: ++ if (vfd_ep_found) ++ usb_free_urb(tx_urb); ++ case 6: ++ usb_free_urb(rx_urb); ++ case 5: ++ lirc_buffer_free(rbuf); ++ case 4: ++ kfree(rbuf); ++ case 3: ++ kfree(plugin); ++ case 2: ++ kfree(context); ++ context = NULL; ++ case 1: ++ retval = -ENOMEM; ++ goto exit; ++ } ++ ++ /* Needed while unregistering! */ ++ plugin->minor = lirc_minor; ++ ++ context->dev = dev; ++ context->dev_present = TRUE; ++ context->rx_endpoint = rx_endpoint; ++ context->rx_urb = rx_urb; ++ if (vfd_ep_found) { ++ context->tx_endpoint = tx_endpoint; ++ context->tx_urb = tx_urb; ++ context->vfd_contrast = 1000; /* range 0 - 1000 */ ++ } ++ context->plugin = plugin; ++ ++ usb_set_intfdata(interface, context); ++ ++ if (vfd_ep_found) { ++ ++ if (debug) ++ info("Registering VFD with sysfs"); ++ if (usb_register_dev(interface, &sasem_class)) ++ /* Not a fatal error, so ignore */ ++ info("%s: could not get a minor number for VFD", ++ __func__); ++ } ++ ++ info("%s: Sasem device on usb<%d:%d> initialized", ++ __func__, dev->bus->busnum, dev->devnum); ++ ++ UNLOCK_CONTEXT; ++exit: ++ return retval; ++} ++ ++/** ++ * Callback function for USB core API: disonnect ++ */ ++static void sasem_disconnect(struct usb_interface *interface) ++{ ++ struct sasem_context *context; ++ ++ /* prevent races with ir_open()/vfd_open() */ ++ down(&disconnect_sem); ++ ++ context = usb_get_intfdata(interface); ++ LOCK_CONTEXT; ++ ++ info("%s: Sasem device disconnected", __func__); ++ ++ usb_set_intfdata(interface, NULL); ++ context->dev_present = FALSE; ++ ++ /* Stop reception */ ++ usb_kill_urb(context->rx_urb); ++ ++ /* Abort ongoing write */ ++ if (atomic_read(&context->tx.busy)) { ++ ++ usb_kill_urb(context->tx_urb); ++ wait_for_completion(&context->tx.finished); ++ } ++ ++ /* De-register from lirc_dev if IR port is not open */ ++ if (!context->ir_isopen) ++ deregister_from_lirc(context); ++ ++ usb_deregister_dev(interface, &sasem_class); ++ ++ UNLOCK_CONTEXT; ++ ++ if (!context->ir_isopen && !context->vfd_isopen) ++ delete_context(context); ++ ++ up(&disconnect_sem); ++} ++ ++#ifdef MODULE ++static int __init sasem_init(void) ++{ ++ int rc; ++ ++ info(MOD_DESC ", v" MOD_VERSION); ++ info(MOD_AUTHOR); ++ ++ rc = usb_register(&sasem_driver); ++ if (rc < 0) { ++ err("%s: usb register failed (%d)", __func__, rc); ++ return -ENODEV; ++ } ++ return SUCCESS; ++} ++ ++static void __exit sasem_exit(void) ++{ ++ usb_deregister(&sasem_driver); ++ info("module removed. Goodbye!"); ++} ++ ++ ++module_init(sasem_init); ++module_exit(sasem_exit); ++ ++#endif /* MODULE */ +diff --git a/drivers/input/lirc/lirc_serial.c b/drivers/input/lirc/lirc_serial.c +new file mode 100644 +index 0000000..465edd9 +--- /dev/null ++++ b/drivers/input/lirc/lirc_serial.c +@@ -0,0 +1,1312 @@ ++/**************************************************************************** ++ ** lirc_serial.c *********************************************************** ++ **************************************************************************** ++ * ++ * lirc_serial - Device driver that records pulse- and pause-lengths ++ * (space-lengths) between DDCD event on a serial port. ++ * ++ * Copyright (C) 1996,97 Ralph Metzler ++ * Copyright (C) 1998 Trent Piepho ++ * Copyright (C) 1998 Ben Pfaff ++ * Copyright (C) 1999 Christoph Bartelmus ++ * Copyright (C) 2007 Andrei Tanas (suspend/resume support) ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ++ * ++ */ ++ ++/* Steve's changes to improve transmission fidelity: ++ - for systems with the rdtsc instruction and the clock counter, a ++ send_pule that times the pulses directly using the counter. ++ This means that the LIRC_SERIAL_TRANSMITTER_LATENCY fudge is ++ not needed. Measurement shows very stable waveform, even where ++ PCI activity slows the access to the UART, which trips up other ++ versions. ++ - For other system, non-integer-microsecond pulse/space lengths, ++ done using fixed point binary. So, much more accurate carrier ++ frequency. ++ - fine tuned transmitter latency, taking advantage of fractional ++ microseconds in previous change ++ - Fixed bug in the way transmitter latency was accounted for by ++ tuning the pulse lengths down - the send_pulse routine ignored ++ this overhead as it timed the overall pulse length - so the ++ pulse frequency was right but overall pulse length was too ++ long. Fixed by accounting for latency on each pulse/space ++ iteration. ++ ++ Steve Davies July 2001 ++*/ ++ ++#include ++ ++#include ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++#include ++ ++#if defined(LIRC_SERIAL_NSLU2) ++#include ++/* From Intel IXP42X Developer's Manual (#252480-005): */ ++/* ftp://download.intel.com/design/network/manuals/25248005.pdf */ ++#define UART_IE_IXP42X_UUE 0x40 /* IXP42X UART Unit enable */ ++#define UART_IE_IXP42X_RTOIE 0x10 /* IXP42X Receiver Data Timeout int.enable */ ++#ifndef NSLU2_LED_GRN_GPIO ++/* added in 2.6.22 */ ++#define NSLU2_LED_GRN_GPIO NSLU2_LED_GRN ++#endif ++#endif ++ ++#include "lirc.h" ++#include "lirc_dev.h" ++ ++#define LIRC_DRIVER_NAME "lirc_serial" ++ ++struct lirc_serial { ++ int signal_pin; ++ int signal_pin_change; ++ int on; ++ int off; ++ long (*send_pulse)(unsigned long length); ++ void (*send_space)(long length); ++ int features; ++}; ++ ++#define LIRC_HOMEBREW 0 ++#define LIRC_IRDEO 1 ++#define LIRC_IRDEO_REMOTE 2 ++#define LIRC_ANIMAX 3 ++#define LIRC_IGOR 4 ++#define LIRC_NSLU2 5 ++ ++#ifdef LIRC_SERIAL_IRDEO ++static int type = LIRC_IRDEO; ++#elif defined(LIRC_SERIAL_IRDEO_REMOTE) ++static int type = LIRC_IRDEO_REMOTE; ++#elif defined(LIRC_SERIAL_ANIMAX) ++static int type = LIRC_ANIMAX; ++#elif defined(LIRC_SERIAL_IGOR) ++static int type = LIRC_IGOR; ++#elif defined(LIRC_SERIAL_NSLU2) ++static int type = LIRC_NSLU2; ++#else ++static int type = LIRC_HOMEBREW; ++#endif ++ ++/* Set defaults for NSLU2 */ ++#if defined(LIRC_SERIAL_NSLU2) ++#ifndef LIRC_IRQ ++#define LIRC_IRQ IRQ_IXP4XX_UART2 ++#endif ++#ifndef LIRC_PORT ++#define LIRC_PORT (IXP4XX_UART2_BASE_VIRT + REG_OFFSET) ++#endif ++#ifndef LIRC_IOMMAP ++#define LIRC_IOMMAP IXP4XX_UART2_BASE_PHYS ++#endif ++#ifndef LIRC_IOSHIFT ++#define LIRC_IOSHIFT 2 ++#endif ++#ifndef LIRC_ALLOW_MMAPPED_IO ++#define LIRC_ALLOW_MMAPPED_IO ++#endif ++#endif ++ ++#if defined(LIRC_ALLOW_MMAPPED_IO) ++#ifndef LIRC_IOMMAP ++#define LIRC_IOMMAP 0 ++#endif ++#ifndef LIRC_IOSHIFT ++#define LIRC_IOSHIFT 0 ++#endif ++static int iommap = LIRC_IOMMAP; ++static int ioshift = LIRC_IOSHIFT; ++#endif ++ ++#ifdef LIRC_SERIAL_SOFTCARRIER ++static int softcarrier = 1; ++#else ++static int softcarrier; ++#endif ++ ++static int share_irq; ++static int debug; ++ ++#define dprintk(fmt, args...) \ ++ do { \ ++ if (debug) \ ++ printk(KERN_DEBUG LIRC_DRIVER_NAME ": " \ ++ fmt, ## args); \ ++ } while (0) ++ ++/* forward declarations */ ++static long send_pulse_irdeo(unsigned long length); ++static long send_pulse_homebrew(unsigned long length); ++static void send_space_irdeo(long length); ++static void send_space_homebrew(long length); ++ ++static struct lirc_serial hardware[] = { ++ /* home-brew receiver/transmitter */ ++ { ++ UART_MSR_DCD, ++ UART_MSR_DDCD, ++ UART_MCR_RTS|UART_MCR_OUT2|UART_MCR_DTR, ++ UART_MCR_RTS|UART_MCR_OUT2, ++ send_pulse_homebrew, ++ send_space_homebrew, ++ ( ++#ifdef LIRC_SERIAL_TRANSMITTER ++ LIRC_CAN_SET_SEND_DUTY_CYCLE| ++ LIRC_CAN_SET_SEND_CARRIER| ++ LIRC_CAN_SEND_PULSE| ++#endif ++ LIRC_CAN_REC_MODE2) ++ }, ++ ++ /* IRdeo classic */ ++ { ++ UART_MSR_DSR, ++ UART_MSR_DDSR, ++ UART_MCR_OUT2, ++ UART_MCR_RTS|UART_MCR_DTR|UART_MCR_OUT2, ++ send_pulse_irdeo, ++ send_space_irdeo, ++ (LIRC_CAN_SET_SEND_DUTY_CYCLE| ++ LIRC_CAN_SEND_PULSE| ++ LIRC_CAN_REC_MODE2) ++ }, ++ ++ /* IRdeo remote */ ++ { ++ UART_MSR_DSR, ++ UART_MSR_DDSR, ++ UART_MCR_RTS|UART_MCR_DTR|UART_MCR_OUT2, ++ UART_MCR_RTS|UART_MCR_DTR|UART_MCR_OUT2, ++ send_pulse_irdeo, ++ send_space_irdeo, ++ (LIRC_CAN_SET_SEND_DUTY_CYCLE| ++ LIRC_CAN_SEND_PULSE| ++ LIRC_CAN_REC_MODE2) ++ }, ++ ++ /* AnimaX */ ++ { ++ UART_MSR_DCD, ++ UART_MSR_DDCD, ++ 0, ++ UART_MCR_RTS|UART_MCR_DTR|UART_MCR_OUT2, ++ NULL, ++ NULL, ++ LIRC_CAN_REC_MODE2 ++ }, ++ ++ /* home-brew receiver/transmitter (Igor Cesko's variation) */ ++ { ++ UART_MSR_DSR, ++ UART_MSR_DDSR, ++ UART_MCR_RTS|UART_MCR_OUT2|UART_MCR_DTR, ++ UART_MCR_RTS|UART_MCR_OUT2, ++ send_pulse_homebrew, ++ send_space_homebrew, ++ ( ++#ifdef LIRC_SERIAL_TRANSMITTER ++ LIRC_CAN_SET_SEND_DUTY_CYCLE| ++ LIRC_CAN_SET_SEND_CARRIER| ++ LIRC_CAN_SEND_PULSE| ++#endif ++ LIRC_CAN_REC_MODE2) ++ }, ++ ++#if defined(LIRC_SERIAL_NSLU2) ++ /* Modified Linksys Network Storage Link USB 2.0 (NSLU2): ++ We receive on CTS of the 2nd serial port (R142,LHS), we ++ transmit with a IR diode between GPIO[1] (green status LED), ++ and ground (Matthias Goebl ). ++ See also http://www.nslu2-linux.org for this device */ ++ { ++ UART_MSR_CTS, ++ UART_MSR_DCTS, ++ UART_MCR_RTS|UART_MCR_OUT2|UART_MCR_DTR, ++ UART_MCR_RTS|UART_MCR_OUT2, ++ send_pulse_homebrew, ++ send_space_homebrew, ++ ( ++#ifdef LIRC_SERIAL_TRANSMITTER ++ LIRC_CAN_SET_SEND_DUTY_CYCLE| ++ LIRC_CAN_SET_SEND_CARRIER| ++ LIRC_CAN_SEND_PULSE| ++#endif ++ LIRC_CAN_REC_MODE2) ++ }, ++#endif ++ ++}; ++ ++#define RS_ISR_PASS_LIMIT 256 ++ ++/* A long pulse code from a remote might take upto 300 bytes. The ++ daemon should read the bytes as soon as they are generated, so take ++ the number of keys you think you can push before the daemon runs ++ and multiply by 300. The driver will warn you if you overrun this ++ buffer. If you have a slow computer or non-busmastering IDE disks, ++ maybe you will need to increase this. */ ++ ++/* This MUST be a power of two! It has to be larger than 1 as well. */ ++ ++#define RBUF_LEN 256 ++#define WBUF_LEN 256 ++ ++static int sense = -1; /* -1 = auto, 0 = active high, 1 = active low */ ++static int txsense; /* 0 = active high, 1 = active low */ ++ ++#ifndef LIRC_IRQ ++#define LIRC_IRQ 4 ++#endif ++#ifndef LIRC_PORT ++#define LIRC_PORT 0x3f8 ++#endif ++ ++static int io = LIRC_PORT; ++static int irq = LIRC_IRQ; ++ ++static struct timeval lasttv = {0, 0}; ++ ++static struct lirc_buffer rbuf; ++ ++static int wbuf[WBUF_LEN]; ++ ++static unsigned int freq = 38000; ++static unsigned int duty_cycle = 50; ++ ++/* Initialized in init_timing_params() */ ++static unsigned long period; ++static unsigned long pulse_width; ++static unsigned long space_width; ++ ++#if defined(__i386__) ++/* ++ From: ++ Linux I/O port programming mini-HOWTO ++ Author: Riku Saikkonen ++ v, 28 December 1997 ++ ++ [...] ++ Actually, a port I/O instruction on most ports in the 0-0x3ff range ++ takes almost exactly 1 microsecond, so if you're, for example, using ++ the parallel port directly, just do additional inb()s from that port ++ to delay. ++ [...] ++*/ ++/* transmitter latency 1.5625us 0x1.90 - this figure arrived at from ++ * comment above plus trimming to match actual measured frequency. ++ * This will be sensitive to cpu speed, though hopefully most of the 1.5us ++ * is spent in the uart access. Still - for reference test machine was a ++ * 1.13GHz Athlon system - Steve ++ */ ++ ++/* changed from 400 to 450 as this works better on slower machines; ++ faster machines will use the rdtsc code anyway */ ++ ++#define LIRC_SERIAL_TRANSMITTER_LATENCY 450 ++ ++#else ++ ++/* does anybody have information on other platforms ? */ ++/* 256 = 1<<8 */ ++#define LIRC_SERIAL_TRANSMITTER_LATENCY 256 ++ ++#endif /* __i386__ */ ++ ++static inline unsigned int sinp(int offset) ++{ ++#if defined(LIRC_ALLOW_MMAPPED_IO) ++ if (iommap != 0) { ++ /* the register is memory-mapped */ ++ offset <<= ioshift; ++ return readb(io + offset); ++ } ++#endif ++ return inb(io + offset); ++} ++ ++static inline void soutp(int offset, int value) ++{ ++#if defined(LIRC_ALLOW_MMAPPED_IO) ++ if (iommap != 0) { ++ /* the register is memory-mapped */ ++ offset <<= ioshift; ++ writeb(value, io + offset); ++ } ++#endif ++ outb(value, io + offset); ++} ++ ++static inline void on(void) ++{ ++#if defined(LIRC_SERIAL_NSLU2) ++ /* On NSLU2, we put the transmit diode between the output of the green ++ status LED and ground */ ++ if (type == LIRC_NSLU2) { ++ gpio_line_set(NSLU2_LED_GRN_GPIO, IXP4XX_GPIO_LOW); ++ return; ++ } ++#endif ++ if (txsense) ++ soutp(UART_MCR, hardware[type].off); ++ else ++ soutp(UART_MCR, hardware[type].on); ++} ++ ++static inline void off(void) ++{ ++#if defined(LIRC_SERIAL_NSLU2) ++ if (type == LIRC_NSLU2) { ++ gpio_line_set(NSLU2_LED_GRN_GPIO, IXP4XX_GPIO_HIGH); ++ return; ++ } ++#endif ++ if (txsense) ++ soutp(UART_MCR, hardware[type].on); ++ else ++ soutp(UART_MCR, hardware[type].off); ++} ++ ++#ifndef MAX_UDELAY_MS ++#define MAX_UDELAY_US 5000 ++#else ++#define MAX_UDELAY_US (MAX_UDELAY_MS*1000) ++#endif ++ ++static inline void safe_udelay(unsigned long usecs) ++{ ++ while (usecs > MAX_UDELAY_US) { ++ udelay(MAX_UDELAY_US); ++ usecs -= MAX_UDELAY_US; ++ } ++ udelay(usecs); ++} ++ ++#ifdef USE_RDTSC ++/* This is an overflow/precision juggle, complicated in that we can't ++ do long long divide in the kernel */ ++ ++/* When we use the rdtsc instruction to measure clocks, we keep the ++ * pulse and space widths as clock cycles. As this is CPU speed ++ * dependent, the widths must be calculated in init_port and ioctl ++ * time ++ */ ++ ++/* So send_pulse can quickly convert microseconds to clocks */ ++static unsigned long conv_us_to_clocks; ++ ++static inline int init_timing_params(unsigned int new_duty_cycle, ++ unsigned int new_freq) ++{ ++ unsigned long long loops_per_sec, work; ++ ++ duty_cycle = new_duty_cycle; ++ freq = new_freq; ++ ++ loops_per_sec = current_cpu_data.loops_per_jiffy; ++ loops_per_sec *= HZ; ++ ++ /* How many clocks in a microsecond?, avoiding long long divide */ ++ work = loops_per_sec; ++ work *= 4295; /* 4295 = 2^32 / 1e6 */ ++ conv_us_to_clocks = (work>>32); ++ ++ /* Carrier period in clocks, approach good up to 32GHz clock, ++ gets carrier frequency within 8Hz */ ++ period = loops_per_sec>>3; ++ period /= (freq>>3); ++ ++ /* Derive pulse and space from the period */ ++ ++ pulse_width = period*duty_cycle/100; ++ space_width = period - pulse_width; ++ dprintk("in init_timing_params, freq=%d, duty_cycle=%d, " ++ "clk/jiffy=%ld, pulse=%ld, space=%ld, " ++ "conv_us_to_clocks=%ld\n", ++ freq, duty_cycle, current_cpu_data.loops_per_jiffy, ++ pulse_width, space_width, conv_us_to_clocks); ++ return 0; ++} ++#else /* ! USE_RDTSC */ ++static inline int init_timing_params(unsigned int new_duty_cycle, ++ unsigned int new_freq) ++{ ++/* period, pulse/space width are kept with 8 binary places - ++ * IE multiplied by 256. */ ++ if (256*1000000L/new_freq*new_duty_cycle/100 <= ++ LIRC_SERIAL_TRANSMITTER_LATENCY) ++ return -EINVAL; ++ if (256*1000000L/new_freq*(100-new_duty_cycle)/100 <= ++ LIRC_SERIAL_TRANSMITTER_LATENCY) ++ return -EINVAL; ++ duty_cycle = new_duty_cycle; ++ freq = new_freq; ++ period = 256*1000000L/freq; ++ pulse_width = period*duty_cycle/100; ++ space_width = period-pulse_width; ++ dprintk("in init_timing_params, freq=%d pulse=%ld, " ++ "space=%ld\n", freq, pulse_width, space_width); ++ return 0; ++} ++#endif /* USE_RDTSC */ ++ ++ ++/* return value: space length delta */ ++ ++static long send_pulse_irdeo(unsigned long length) ++{ ++ long rawbits; ++ int i; ++ unsigned char output; ++ unsigned char chunk, shifted; ++ ++ /* how many bits have to be sent ? */ ++ rawbits = length*1152/10000; ++ if (duty_cycle > 50) ++ chunk = 3; ++ else ++ chunk = 1; ++ for (i = 0, output = 0x7f; rawbits > 0; rawbits -= 3) { ++ shifted = chunk<<(i*3); ++ shifted >>= 1; ++ output &= (~shifted); ++ i++; ++ if (i == 3) { ++ soutp(UART_TX, output); ++ while (!(sinp(UART_LSR) & UART_LSR_THRE)) ++ ; ++ output = 0x7f; ++ i = 0; ++ } ++ } ++ if (i != 0) { ++ soutp(UART_TX, output); ++ while (!(sinp(UART_LSR) & UART_LSR_TEMT)) ++ ; ++ } ++ ++ if (i == 0) ++ return (-rawbits)*10000/1152; ++ else ++ return (3-i)*3*10000/1152 + (-rawbits)*10000/1152; ++} ++ ++#ifdef USE_RDTSC ++/* Version that uses Pentium rdtsc instruction to measure clocks */ ++ ++/* This version does sub-microsecond timing using rdtsc instruction, ++ * and does away with the fudged LIRC_SERIAL_TRANSMITTER_LATENCY ++ * Implicitly i586 architecture... - Steve ++ */ ++ ++static inline long send_pulse_homebrew_softcarrier(unsigned long length) ++{ ++ int flag; ++ unsigned long target, start, now; ++ ++ /* Get going quick as we can */ ++ rdtscl(start); on(); ++ /* Convert length from microseconds to clocks */ ++ length *= conv_us_to_clocks; ++ /* And loop till time is up - flipping at right intervals */ ++ now = start; ++ target = pulse_width; ++ flag = 1; ++ while ((now-start) < length) { ++ /* Delay till flip time */ ++ do { ++ rdtscl(now); ++ } while ((now-start) < target); ++ ++ /* flip */ ++ if (flag) { ++ rdtscl(now); off(); ++ target += space_width; ++ } else { ++ rdtscl(now); on(); ++ target += pulse_width; ++ } ++ flag = !flag; ++ } ++ rdtscl(now); ++ return ((now-start)-length) / conv_us_to_clocks; ++} ++#else /* ! USE_RDTSC */ ++/* Version using udelay() */ ++ ++/* here we use fixed point arithmetic, with 8 ++ fractional bits. that gets us within 0.1% or so of the right average ++ frequency, albeit with some jitter in pulse length - Steve */ ++ ++/* To match 8 fractional bits used for pulse/space length */ ++ ++static inline long send_pulse_homebrew_softcarrier(unsigned long length) ++{ ++ int flag; ++ unsigned long actual, target, d; ++ length <<= 8; ++ ++ actual = 0; target = 0; flag = 0; ++ while (actual < length) { ++ if (flag) { ++ off(); ++ target += space_width; ++ } else { ++ on(); ++ target += pulse_width; ++ } ++ d = (target-actual-LIRC_SERIAL_TRANSMITTER_LATENCY+128)>>8; ++ /* Note - we've checked in ioctl that the pulse/space ++ widths are big enough so that d is > 0 */ ++ udelay(d); ++ actual += (d<<8)+LIRC_SERIAL_TRANSMITTER_LATENCY; ++ flag = !flag; ++ } ++ return (actual-length)>>8; ++} ++#endif /* USE_RDTSC */ ++ ++static long send_pulse_homebrew(unsigned long length) ++{ ++ if (length <= 0) ++ return 0; ++ ++ if (softcarrier) ++ return send_pulse_homebrew_softcarrier(length); ++ else { ++ on(); ++ safe_udelay(length); ++ return 0; ++ } ++} ++ ++static void send_space_irdeo(long length) ++{ ++ if (length <= 0) ++ return; ++ ++ safe_udelay(length); ++} ++ ++static void send_space_homebrew(long length) ++{ ++ off(); ++ if (length <= 0) ++ return; ++ safe_udelay(length); ++} ++ ++static inline void rbwrite(int l) ++{ ++ if (lirc_buffer_full(&rbuf)) { ++ /* no new signals will be accepted */ ++ dprintk("Buffer overrun\n"); ++ return; ++ } ++ _lirc_buffer_write_1(&rbuf, (void *)&l); ++} ++ ++static inline void frbwrite(int l) ++{ ++ /* simple noise filter */ ++ static int pulse, space; ++ static unsigned int ptr; ++ ++ if (ptr > 0 && (l & PULSE_BIT)) { ++ pulse += l & PULSE_MASK; ++ if (pulse > 250) { ++ rbwrite(space); ++ rbwrite(pulse | PULSE_BIT); ++ ptr = 0; ++ pulse = 0; ++ } ++ return; ++ } ++ if (!(l & PULSE_BIT)) { ++ if (ptr == 0) { ++ if (l > 20000) { ++ space = l; ++ ptr++; ++ return; ++ } ++ } else { ++ if (l > 20000) { ++ space += pulse; ++ if (space > PULSE_MASK) ++ space = PULSE_MASK; ++ space += l; ++ if (space > PULSE_MASK) ++ space = PULSE_MASK; ++ pulse = 0; ++ return; ++ } ++ rbwrite(space); ++ rbwrite(pulse | PULSE_BIT); ++ ptr = 0; ++ pulse = 0; ++ } ++ } ++ rbwrite(l); ++} ++ ++static irqreturn_t irq_handler(int i, void *blah) ++{ ++ struct timeval tv; ++ int status, counter, dcd; ++ long deltv; ++ int data; ++ static int last_dcd = -1; ++ ++ if ((sinp(UART_IIR) & UART_IIR_NO_INT)) { ++ /* not our interrupt */ ++ return IRQ_RETVAL(IRQ_NONE); ++ } ++ ++ counter = 0; ++ do { ++ counter++; ++ status = sinp(UART_MSR); ++ if (counter > RS_ISR_PASS_LIMIT) { ++ printk(KERN_WARNING LIRC_DRIVER_NAME ": AIEEEE: " ++ "We're caught!\n"); ++ break; ++ } ++ if ((status&hardware[type].signal_pin_change) && sense != -1) { ++ /* get current time */ ++ do_gettimeofday(&tv); ++ ++ /* New mode, written by Trent Piepho ++ . */ ++ ++ /* The old format was not very portable. ++ We now use an int to pass pulses ++ and spaces to user space. ++ ++ If PULSE_BIT is set a pulse has been ++ received, otherwise a space has been ++ received. The driver needs to know if your ++ receiver is active high or active low, or ++ the space/pulse sense could be ++ inverted. The bits denoted by PULSE_MASK are ++ the length in microseconds. Lengths greater ++ than or equal to 16 seconds are clamped to ++ PULSE_MASK. All other bits are unused. ++ This is a much simpler interface for user ++ programs, as well as eliminating "out of ++ phase" errors with space/pulse ++ autodetection. */ ++ ++ /* calculate time since last interrupt in ++ microseconds */ ++ dcd = (status & hardware[type].signal_pin) ? 1 : 0; ++ ++ if (dcd == last_dcd) { ++ printk(KERN_WARNING LIRC_DRIVER_NAME ++ ": ignoring spike: %d %d %lx %lx %lx %lx\n", ++ dcd, sense, ++ tv.tv_sec, lasttv.tv_sec, ++ tv.tv_usec, lasttv.tv_usec); ++ continue; ++ } ++ ++ deltv = tv.tv_sec-lasttv.tv_sec; ++ if (tv.tv_sec < lasttv.tv_sec || ++ (tv.tv_sec == lasttv.tv_sec && ++ tv.tv_usec < lasttv.tv_usec)) { ++ printk(KERN_WARNING LIRC_DRIVER_NAME ++ ": AIEEEE: your clock just jumped " ++ "backwards\n"); ++ printk(KERN_WARNING LIRC_DRIVER_NAME ++ ": %d %d %lx %lx %lx %lx\n", ++ dcd, sense, ++ tv.tv_sec, lasttv.tv_sec, ++ tv.tv_usec, lasttv.tv_usec); ++ data = PULSE_MASK; ++ } else if (deltv > 15) { ++ data = PULSE_MASK; /* really long time */ ++ if (!(dcd^sense)) { ++ /* sanity check */ ++ printk(KERN_WARNING LIRC_DRIVER_NAME ++ ": AIEEEE: " ++ "%d %d %lx %lx %lx %lx\n", ++ dcd, sense, ++ tv.tv_sec, lasttv.tv_sec, ++ tv.tv_usec, lasttv.tv_usec); ++ /* detecting pulse while this ++ MUST be a space! */ ++ sense = sense ? 0 : 1; ++ } ++ } else ++ data = (int) (deltv*1000000 + ++ tv.tv_usec - ++ lasttv.tv_usec); ++ frbwrite(dcd^sense ? data : (data|PULSE_BIT)); ++ lasttv = tv; ++ last_dcd = dcd; ++ wake_up_interruptible(&rbuf.wait_poll); ++ } ++ } while (!(sinp(UART_IIR) & UART_IIR_NO_INT)); /* still pending ? */ ++ return IRQ_RETVAL(IRQ_HANDLED); ++} ++ ++static void hardware_init_port(void) ++{ ++ unsigned long flags; ++ local_irq_save(flags); ++ ++ /* Set DLAB 0. */ ++ soutp(UART_LCR, sinp(UART_LCR) & (~UART_LCR_DLAB)); ++ ++ /* First of all, disable all interrupts */ ++ soutp(UART_IER, sinp(UART_IER) & ++ (~(UART_IER_MSI|UART_IER_RLSI|UART_IER_THRI|UART_IER_RDI))); ++ ++ /* Clear registers. */ ++ sinp(UART_LSR); ++ sinp(UART_RX); ++ sinp(UART_IIR); ++ sinp(UART_MSR); ++ ++#if defined(LIRC_SERIAL_NSLU2) ++ if (type == LIRC_NSLU2) { ++ /* Setup NSLU2 UART */ ++ ++ /* Enable UART */ ++ soutp(UART_IER, sinp(UART_IER) | UART_IE_IXP42X_UUE); ++ /* Disable Receiver data Time out interrupt */ ++ soutp(UART_IER, sinp(UART_IER) & ~UART_IE_IXP42X_RTOIE); ++ /* set out2 = interupt unmask; off() doesn't set MCR ++ on NSLU2 */ ++ soutp(UART_MCR, UART_MCR_RTS|UART_MCR_OUT2); ++ } ++#endif ++ ++ /* Set line for power source */ ++ off(); ++ ++ /* Clear registers again to be sure. */ ++ sinp(UART_LSR); ++ sinp(UART_RX); ++ sinp(UART_IIR); ++ sinp(UART_MSR); ++ ++ switch (type) { ++ case LIRC_IRDEO: ++ case LIRC_IRDEO_REMOTE: ++ /* setup port to 7N1 @ 115200 Baud */ ++ /* 7N1+start = 9 bits at 115200 ~ 3 bits at 38kHz */ ++ ++ /* Set DLAB 1. */ ++ soutp(UART_LCR, sinp(UART_LCR) | UART_LCR_DLAB); ++ /* Set divisor to 1 => 115200 Baud */ ++ soutp(UART_DLM, 0); ++ soutp(UART_DLL, 1); ++ /* Set DLAB 0 + 7N1 */ ++ soutp(UART_LCR, UART_LCR_WLEN7); ++ /* THR interrupt already disabled at this point */ ++ break; ++ default: ++ break; ++ } ++ ++ local_irq_restore(flags); ++} ++ ++static int init_port(void) ++{ ++ int i, nlow, nhigh; ++ ++ /* Reserve io region. */ ++#if defined(LIRC_ALLOW_MMAPPED_IO) ++ /* Future MMAP-Developers: Attention! ++ For memory mapped I/O you *might* need to use ioremap() first, ++ for the NSLU2 it's done in boot code. */ ++ if (((iommap != 0) ++ && (request_mem_region(iommap, 8<= nhigh ? 1 : 0); ++ printk(KERN_INFO LIRC_DRIVER_NAME ": auto-detected active " ++ "%s receiver\n", sense ? "low" : "high"); ++ } else ++ printk(KERN_INFO LIRC_DRIVER_NAME ": Manually using active " ++ "%s receiver\n", sense ? "low" : "high"); ++ ++ return 0; ++} ++ ++static int set_use_inc(void *data) ++{ ++ int result; ++ unsigned long flags; ++ ++ /* Init read buffer. */ ++ if (lirc_buffer_init(&rbuf, sizeof(int), RBUF_LEN) < 0) ++ return -ENOMEM; ++ ++ /* initialize timestamp */ ++ do_gettimeofday(&lasttv); ++ ++ result = request_irq(irq, irq_handler, ++ IRQF_DISABLED | (share_irq ? IRQF_SHARED : 0), ++ LIRC_DRIVER_NAME, (void *)&hardware); ++ ++ switch (result) { ++ case -EBUSY: ++ printk(KERN_ERR LIRC_DRIVER_NAME ": IRQ %d busy\n", irq); ++ lirc_buffer_free(&rbuf); ++ return -EBUSY; ++ case -EINVAL: ++ printk(KERN_ERR LIRC_DRIVER_NAME ++ ": Bad irq number or handler\n"); ++ lirc_buffer_free(&rbuf); ++ return -EINVAL; ++ default: ++ dprintk("Interrupt %d, port %04x obtained\n", irq, io); ++ break; ++ }; ++ ++ local_irq_save(flags); ++ ++ /* Set DLAB 0. */ ++ soutp(UART_LCR, sinp(UART_LCR) & (~UART_LCR_DLAB)); ++ ++ soutp(UART_IER, sinp(UART_IER)|UART_IER_MSI); ++ ++ local_irq_restore(flags); ++ ++ return 0; ++} ++ ++static void set_use_dec(void *data) ++{ unsigned long flags; ++ ++ local_irq_save(flags); ++ ++ /* Set DLAB 0. */ ++ soutp(UART_LCR, sinp(UART_LCR) & (~UART_LCR_DLAB)); ++ ++ /* First of all, disable all interrupts */ ++ soutp(UART_IER, sinp(UART_IER) & ++ (~(UART_IER_MSI|UART_IER_RLSI|UART_IER_THRI|UART_IER_RDI))); ++ local_irq_restore(flags); ++ ++ free_irq(irq, (void *)&hardware); ++ ++ dprintk("freed IRQ %d\n", irq); ++ lirc_buffer_free(&rbuf); ++} ++ ++static ssize_t lirc_write(struct file *file, const char *buf, ++ size_t n, loff_t *ppos) ++{ ++ int i, count; ++ unsigned long flags; ++ long delta = 0; ++ ++ if (!(hardware[type].features&LIRC_CAN_SEND_PULSE)) ++ return -EBADF; ++ ++ if (n % sizeof(int)) ++ return -EINVAL; ++ count = n / sizeof(int); ++ if (count > WBUF_LEN || count % 2 == 0) ++ return -EINVAL; ++ if (copy_from_user(wbuf, buf, n)) ++ return -EFAULT; ++ local_irq_save(flags); ++ if (type == LIRC_IRDEO) { ++ /* DTR, RTS down */ ++ on(); ++ } ++ for (i = 0; i < count; i++) { ++ if (i%2) ++ hardware[type].send_space(wbuf[i]-delta); ++ else ++ delta = hardware[type].send_pulse(wbuf[i]); ++ } ++ off(); ++ local_irq_restore(flags); ++ return n; ++} ++ ++static int lirc_ioctl(struct inode *node, struct file *filep, unsigned int cmd, ++ unsigned long arg) ++{ ++ int result; ++ unsigned long value; ++ unsigned int ivalue; ++ ++ switch (cmd) { ++ case LIRC_GET_SEND_MODE: ++ if (!(hardware[type].features&LIRC_CAN_SEND_MASK)) ++ return -ENOIOCTLCMD; ++ ++ result = put_user(LIRC_SEND2MODE ++ (hardware[type].features&LIRC_CAN_SEND_MASK), ++ (unsigned long *) arg); ++ if (result) ++ return result; ++ break; ++ ++ case LIRC_SET_SEND_MODE: ++ if (!(hardware[type].features&LIRC_CAN_SEND_MASK)) ++ return -ENOIOCTLCMD; ++ ++ result = get_user(value, (unsigned long *) arg); ++ if (result) ++ return result; ++ /* only LIRC_MODE_PULSE supported */ ++ if (value != LIRC_MODE_PULSE) ++ return -ENOSYS; ++ break; ++ ++ case LIRC_GET_LENGTH: ++ return -ENOSYS; ++ break; ++ ++ case LIRC_SET_SEND_DUTY_CYCLE: ++ dprintk("SET_SEND_DUTY_CYCLE\n"); ++ if (!(hardware[type].features&LIRC_CAN_SET_SEND_DUTY_CYCLE)) ++ return -ENOIOCTLCMD; ++ ++ result = get_user(ivalue, (unsigned int *) arg); ++ if (result) ++ return result; ++ if (ivalue <= 0 || ivalue > 100) ++ return -EINVAL; ++ return init_timing_params(ivalue, freq); ++ break; ++ ++ case LIRC_SET_SEND_CARRIER: ++ dprintk("SET_SEND_CARRIER\n"); ++ if (!(hardware[type].features&LIRC_CAN_SET_SEND_CARRIER)) ++ return -ENOIOCTLCMD; ++ ++ result = get_user(ivalue, (unsigned int *) arg); ++ if (result) ++ return result; ++ if (ivalue > 500000 || ivalue < 20000) ++ return -EINVAL; ++ return init_timing_params(duty_cycle, ivalue); ++ break; ++ ++ default: ++ return -ENOIOCTLCMD; ++ } ++ return 0; ++} ++ ++static struct file_operations lirc_fops = { ++ .write = lirc_write, ++}; ++ ++static struct lirc_plugin plugin = { ++ .name = LIRC_DRIVER_NAME, ++ .minor = -1, ++ .code_length = 1, ++ .sample_rate = 0, ++ .data = NULL, ++ .add_to_buf = NULL, ++ .get_queue = NULL, ++ .rbuf = &rbuf, ++ .set_use_inc = set_use_inc, ++ .set_use_dec = set_use_dec, ++ .ioctl = lirc_ioctl, ++ .fops = &lirc_fops, ++ .dev = NULL, ++ .owner = THIS_MODULE, ++}; ++ ++#ifdef MODULE ++ ++static struct platform_device *lirc_serial_dev; ++ ++static int __devinit lirc_serial_probe(struct platform_device *dev) ++{ ++ return 0; ++} ++ ++static int __devexit lirc_serial_remove(struct platform_device *dev) ++{ ++ return 0; ++} ++ ++static int lirc_serial_suspend(struct platform_device *dev, ++ pm_message_t state) ++{ ++ /* Set DLAB 0. */ ++ soutp(UART_LCR, sinp(UART_LCR) & (~UART_LCR_DLAB)); ++ ++ /* Disable all interrupts */ ++ soutp(UART_IER, sinp(UART_IER) & ++ (~(UART_IER_MSI|UART_IER_RLSI|UART_IER_THRI|UART_IER_RDI))); ++ ++ /* Clear registers. */ ++ sinp(UART_LSR); ++ sinp(UART_RX); ++ sinp(UART_IIR); ++ sinp(UART_MSR); ++ ++ return 0; ++} ++ ++static int lirc_serial_resume(struct platform_device *dev) ++{ ++ unsigned long flags; ++ ++ hardware_init_port(); ++ ++ local_irq_save(flags); ++ /* Enable Interrupt */ ++ do_gettimeofday(&lasttv); ++ soutp(UART_IER, sinp(UART_IER)|UART_IER_MSI); ++ off(); ++ ++ lirc_buffer_clear(&rbuf); ++ ++ local_irq_restore(flags); ++ ++ return 0; ++} ++ ++static struct platform_driver lirc_serial_driver = { ++ .probe = lirc_serial_probe, ++ .remove = __devexit_p(lirc_serial_remove), ++ .suspend = lirc_serial_suspend, ++ .resume = lirc_serial_resume, ++ .driver = { ++ .name = "lirc_serial", ++ .owner = THIS_MODULE, ++ }, ++}; ++ ++static int __init lirc_serial_init(void) ++{ ++ int result; ++ ++ result = platform_driver_register(&lirc_serial_driver); ++ if (result) { ++ printk("lirc register returned %d\n", result); ++ return result; ++ } ++ ++ lirc_serial_dev = platform_device_alloc("lirc_serial", 0); ++ if (!lirc_serial_dev) { ++ result = -ENOMEM; ++ goto exit_driver_unregister; ++ } ++ ++ result = platform_device_add(lirc_serial_dev); ++ if (result) ++ goto exit_device_put; ++ ++ return 0; ++ ++exit_device_put: ++ platform_device_put(lirc_serial_dev); ++exit_driver_unregister: ++ platform_driver_unregister(&lirc_serial_driver); ++ return result; ++} ++ ++static void __exit lirc_serial_exit(void) ++{ ++ platform_device_unregister(lirc_serial_dev); ++ platform_driver_unregister(&lirc_serial_driver); ++} ++ ++int __init init_module(void) ++{ ++ int result; ++ ++ result = lirc_serial_init(); ++ if (result) ++ return result; ++ switch (type) { ++ case LIRC_HOMEBREW: ++ case LIRC_IRDEO: ++ case LIRC_IRDEO_REMOTE: ++ case LIRC_ANIMAX: ++ case LIRC_IGOR: ++#if defined(LIRC_SERIAL_NSLU2) ++ case LIRC_NSLU2: ++#endif ++ break; ++ default: ++ result = -EINVAL; ++ goto exit_serial_exit; ++ } ++ if (!softcarrier) { ++ switch (type) { ++ case LIRC_HOMEBREW: ++ case LIRC_IGOR: ++ case LIRC_NSLU2: ++ hardware[type].features &= ++ ~(LIRC_CAN_SET_SEND_DUTY_CYCLE| ++ LIRC_CAN_SET_SEND_CARRIER); ++ break; ++ } ++ } ++ result = init_port(); ++ if (result < 0) ++ goto exit_serial_exit; ++ plugin.features = hardware[type].features; ++ plugin.minor = lirc_register_plugin(&plugin); ++ if (plugin.minor < 0) { ++ printk(KERN_ERR LIRC_DRIVER_NAME ++ ": register_chrdev failed!\n"); ++ result = -EIO; ++ goto exit_release; ++ } ++ return 0; ++exit_release: ++ release_region(io, 8); ++exit_serial_exit: ++ lirc_serial_exit(); ++ return result; ++} ++ ++void __exit cleanup_module(void) ++{ ++ lirc_serial_exit(); ++#if defined(LIRC_ALLOW_MMAPPED_IO) ++ if (iommap != 0) ++ release_mem_region(iommap, 8< ++ * ++ * lirc_sir - Device driver for use with SIR (serial infra red) ++ * mode of IrDA on many notebooks. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ++ * ++ * ++ * 2000/09/16 Frank Przybylski : ++ * added timeout and relaxed pulse detection, removed gap bug ++ * ++ * 2000/12/15 Christoph Bartelmus : ++ * added support for Tekram Irmate 210 (sending does not work yet, ++ * kind of disappointing that nobody was able to implement that ++ * before), ++ * major clean-up ++ * ++ * 2001/02/27 Christoph Bartelmus : ++ * added support for StrongARM SA1100 embedded microprocessor ++ * parts cut'n'pasted from sa1100_ir.c (C) 2000 Russell King ++ */ ++ ++ ++#include ++#include ++ ++#include ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#ifdef LIRC_ON_SA1100 ++#include ++#ifdef CONFIG_SA1100_COLLIE ++#include ++#include ++#endif ++#endif ++ ++#include ++ ++#include "lirc.h" ++#include "lirc_dev.h" ++ ++/* SECTION: Definitions */ ++ ++/**************************** Tekram dongle ***************************/ ++#ifdef LIRC_SIR_TEKRAM ++/* stolen from kernel source */ ++/* definitions for Tekram dongle */ ++#define TEKRAM_115200 0x00 ++#define TEKRAM_57600 0x01 ++#define TEKRAM_38400 0x02 ++#define TEKRAM_19200 0x03 ++#define TEKRAM_9600 0x04 ++#define TEKRAM_2400 0x08 ++ ++#define TEKRAM_PW 0x10 /* Pulse select bit */ ++ ++/* 10bit * 1s/115200bit in miliseconds = 87ms*/ ++#define TIME_CONST (10000000ul/115200ul) ++ ++#endif ++ ++#ifdef LIRC_SIR_ACTISYS_ACT200L ++static void init_act200(void); ++#elif defined(LIRC_SIR_ACTISYS_ACT220L) ++static void init_act220(void); ++#endif ++ ++/******************************* SA1100 ********************************/ ++#ifdef LIRC_ON_SA1100 ++struct sa1100_ser2_registers { ++ /* HSSP control register */ ++ unsigned char hscr0; ++ /* UART registers */ ++ unsigned char utcr0; ++ unsigned char utcr1; ++ unsigned char utcr2; ++ unsigned char utcr3; ++ unsigned char utcr4; ++ unsigned char utdr; ++ unsigned char utsr0; ++ unsigned char utsr1; ++} sr; ++ ++static int irq = IRQ_Ser2ICP; ++ ++#define LIRC_ON_SA1100_TRANSMITTER_LATENCY 0 ++ ++/* pulse/space ratio of 50/50 */ ++static unsigned long pulse_width = (13-LIRC_ON_SA1100_TRANSMITTER_LATENCY); ++/* 1000000/freq-pulse_width */ ++static unsigned long space_width = (13-LIRC_ON_SA1100_TRANSMITTER_LATENCY); ++static unsigned int freq = 38000; /* modulation frequency */ ++static unsigned int duty_cycle = 50; /* duty cycle of 50% */ ++ ++#endif ++ ++#define RBUF_LEN 1024 ++#define WBUF_LEN 1024 ++ ++#define LIRC_DRIVER_NAME "lirc_sir" ++ ++#define PULSE '[' ++ ++#ifndef LIRC_SIR_TEKRAM ++/* 9bit * 1s/115200bit in milli seconds = 78.125ms*/ ++#define TIME_CONST (9000000ul/115200ul) ++#endif ++ ++ ++/* timeout for sequences in jiffies (=5/100s) */ ++/* must be longer than TIME_CONST */ ++#define SIR_TIMEOUT (HZ*5/100) ++ ++#ifndef LIRC_ON_SA1100 ++#ifndef LIRC_IRQ ++#define LIRC_IRQ 4 ++#endif ++#ifndef LIRC_PORT ++#define LIRC_PORT 0x3e8 ++#endif ++ ++static int io = LIRC_PORT; ++static int irq = LIRC_IRQ; ++static int threshold = 3; ++#endif ++ ++static DEFINE_SPINLOCK(timer_lock); ++static struct timer_list timerlist; ++/* time of last signal change detected */ ++static struct timeval last_tv = {0, 0}; ++/* time of last UART data ready interrupt */ ++static struct timeval last_intr_tv = {0, 0}; ++static int last_value; ++ ++static DECLARE_WAIT_QUEUE_HEAD(lirc_read_queue); ++ ++static DEFINE_SPINLOCK(hardware_lock); ++static DEFINE_SPINLOCK(dev_lock); ++ ++static int rx_buf[RBUF_LEN]; ++static unsigned int rx_tail, rx_head; ++static int tx_buf[WBUF_LEN]; ++ ++static int debug; ++#define dprintk(fmt, args...) \ ++ do { \ ++ if (debug) \ ++ printk(KERN_DEBUG LIRC_DRIVER_NAME ": " \ ++ fmt, ## args); \ ++ } while (0) ++ ++/* SECTION: Prototypes */ ++ ++/* Communication with user-space */ ++static int lirc_open(struct inode *inode, struct file *file); ++static int lirc_close(struct inode *inode, struct file *file); ++static unsigned int lirc_poll(struct file *file, poll_table *wait); ++static ssize_t lirc_read(struct file *file, char *buf, size_t count, ++ loff_t *ppos); ++static ssize_t lirc_write(struct file *file, const char *buf, size_t n, ++ loff_t *pos); ++static int lirc_ioctl(struct inode *node, struct file *filep, unsigned int cmd, ++ unsigned long arg); ++static void add_read_queue(int flag, unsigned long val); ++#ifdef MODULE ++static int init_chrdev(void); ++static void drop_chrdev(void); ++#endif ++ /* Hardware */ ++static irqreturn_t sir_interrupt(int irq, void *dev_id); ++static void send_space(unsigned long len); ++static void send_pulse(unsigned long len); ++static int init_hardware(void); ++static void drop_hardware(void); ++ /* Initialisation */ ++static int init_port(void); ++static void drop_port(void); ++ ++#ifdef LIRC_ON_SA1100 ++static inline void on(void) ++{ ++ PPSR |= PPC_TXD2; ++} ++ ++static inline void off(void) ++{ ++ PPSR &= ~PPC_TXD2; ++} ++#else ++static inline unsigned int sinp(int offset) ++{ ++ return inb(io + offset); ++} ++ ++static inline void soutp(int offset, int value) ++{ ++ outb(value, io + offset); ++} ++#endif ++ ++#ifndef MAX_UDELAY_MS ++#define MAX_UDELAY_US 5000 ++#else ++#define MAX_UDELAY_US (MAX_UDELAY_MS*1000) ++#endif ++ ++static inline void safe_udelay(unsigned long usecs) ++{ ++ while (usecs > MAX_UDELAY_US) { ++ udelay(MAX_UDELAY_US); ++ usecs -= MAX_UDELAY_US; ++ } ++ udelay(usecs); ++} ++ ++/* SECTION: Communication with user-space */ ++ ++static int lirc_open(struct inode *inode, struct file *file) ++{ ++ spin_lock(&dev_lock); ++ if (module_refcount(THIS_MODULE)) { ++ spin_unlock(&dev_lock); ++ return -EBUSY; ++ } ++ spin_unlock(&dev_lock); ++ return 0; ++} ++ ++static int lirc_close(struct inode *inode, struct file *file) ++{ ++ return 0; ++} ++ ++static unsigned int lirc_poll(struct file *file, poll_table *wait) ++{ ++ poll_wait(file, &lirc_read_queue, wait); ++ if (rx_head != rx_tail) ++ return POLLIN | POLLRDNORM; ++ return 0; ++} ++ ++static ssize_t lirc_read(struct file *file, char *buf, size_t count, ++ loff_t *ppos) ++{ ++ int n = 0; ++ int retval = 0; ++ DECLARE_WAITQUEUE(wait, current); ++ ++ if (n % sizeof(int)) ++ return -EINVAL; ++ ++ add_wait_queue(&lirc_read_queue, &wait); ++ set_current_state(TASK_INTERRUPTIBLE); ++ while (n < count) { ++ if (rx_head != rx_tail) { ++ if (copy_to_user((void *) buf + n, ++ (void *) (rx_buf + rx_head), ++ sizeof(int))) { ++ retval = -EFAULT; ++ break; ++ } ++ rx_head = (rx_head + 1) & (RBUF_LEN - 1); ++ n += sizeof(int); ++ } else { ++ if (file->f_flags & O_NONBLOCK) { ++ retval = -EAGAIN; ++ break; ++ } ++ if (signal_pending(current)) { ++ retval = -ERESTARTSYS; ++ break; ++ } ++ schedule(); ++ set_current_state(TASK_INTERRUPTIBLE); ++ } ++ } ++ remove_wait_queue(&lirc_read_queue, &wait); ++ set_current_state(TASK_RUNNING); ++ return n ? n : retval; ++} ++static ssize_t lirc_write(struct file *file, const char *buf, size_t n, ++ loff_t *pos) ++{ ++ unsigned long flags; ++ int i; ++ ++ if (n % sizeof(int) || (n / sizeof(int)) > WBUF_LEN) ++ return -EINVAL; ++ if (copy_from_user(tx_buf, buf, n)) ++ return -EFAULT; ++ i = 0; ++ n /= sizeof(int); ++#ifdef LIRC_ON_SA1100 ++ /* disable receiver */ ++ Ser2UTCR3 = 0; ++#endif ++ local_irq_save(flags); ++ while (1) { ++ if (i >= n) ++ break; ++ if (tx_buf[i]) ++ send_pulse(tx_buf[i]); ++ i++; ++ if (i >= n) ++ break; ++ if (tx_buf[i]) ++ send_space(tx_buf[i]); ++ i++; ++ } ++ local_irq_restore(flags); ++#ifdef LIRC_ON_SA1100 ++ off(); ++ udelay(1000); /* wait 1ms for IR diode to recover */ ++ Ser2UTCR3 = 0; ++ /* clear status register to prevent unwanted interrupts */ ++ Ser2UTSR0 &= (UTSR0_RID | UTSR0_RBB | UTSR0_REB); ++ /* enable receiver */ ++ Ser2UTCR3 = UTCR3_RXE|UTCR3_RIE; ++#endif ++ return n; ++} ++ ++static int lirc_ioctl(struct inode *node, struct file *filep, unsigned int cmd, ++ unsigned long arg) ++{ ++ int retval = 0; ++ unsigned long value = 0; ++#ifdef LIRC_ON_SA1100 ++ unsigned int ivalue; ++ ++ if (cmd == LIRC_GET_FEATURES) ++ value = LIRC_CAN_SEND_PULSE | ++ LIRC_CAN_SET_SEND_DUTY_CYCLE | ++ LIRC_CAN_SET_SEND_CARRIER | ++ LIRC_CAN_REC_MODE2; ++ else if (cmd == LIRC_GET_SEND_MODE) ++ value = LIRC_MODE_PULSE; ++ else if (cmd == LIRC_GET_REC_MODE) ++ value = LIRC_MODE_MODE2; ++#else ++ if (cmd == LIRC_GET_FEATURES) ++ value = LIRC_CAN_SEND_PULSE | LIRC_CAN_REC_MODE2; ++ else if (cmd == LIRC_GET_SEND_MODE) ++ value = LIRC_MODE_PULSE; ++ else if (cmd == LIRC_GET_REC_MODE) ++ value = LIRC_MODE_MODE2; ++#endif ++ ++ switch (cmd) { ++ case LIRC_GET_FEATURES: ++ case LIRC_GET_SEND_MODE: ++ case LIRC_GET_REC_MODE: ++ retval = put_user(value, (unsigned long *) arg); ++ break; ++ ++ case LIRC_SET_SEND_MODE: ++ case LIRC_SET_REC_MODE: ++ retval = get_user(value, (unsigned long *) arg); ++ break; ++#ifdef LIRC_ON_SA1100 ++ case LIRC_SET_SEND_DUTY_CYCLE: ++ retval = get_user(ivalue, (unsigned int *) arg); ++ if (retval) ++ return reetval; ++ if (ivalue <= 0 || ivalue > 100) ++ return -EINVAL; ++ /* (ivalue/100)*(1000000/freq) */ ++ duty_cycle = ivalue; ++ pulse_width = (unsigned long) duty_cycle*10000/freq; ++ space_width = (unsigned long) 1000000L/freq-pulse_width; ++ if (pulse_width >= LIRC_ON_SA1100_TRANSMITTER_LATENCY) ++ pulse_width -= LIRC_ON_SA1100_TRANSMITTER_LATENCY; ++ if (space_width >= LIRC_ON_SA1100_TRANSMITTER_LATENCY) ++ space_width -= LIRC_ON_SA1100_TRANSMITTER_LATENCY; ++ break; ++ case LIRC_SET_SEND_CARRIER: ++ retval = get_user(ivalue, (unsigned int *) arg); ++ if (retval) ++ return retval; ++ if (ivalue > 500000 || ivalue < 20000) ++ return -EINVAL; ++ freq = ivalue; ++ pulse_width = (unsigned long) duty_cycle*10000/freq; ++ space_width = (unsigned long) 1000000L/freq-pulse_width; ++ if (pulse_width >= LIRC_ON_SA1100_TRANSMITTER_LATENCY) ++ pulse_width -= LIRC_ON_SA1100_TRANSMITTER_LATENCY; ++ if (space_width >= LIRC_ON_SA1100_TRANSMITTER_LATENCY) ++ space_width -= LIRC_ON_SA1100_TRANSMITTER_LATENCY; ++ break; ++#endif ++ default: ++ retval = -ENOIOCTLCMD; ++ ++ } ++ ++ if (retval) ++ return retval; ++ if (cmd == LIRC_SET_REC_MODE) { ++ if (value != LIRC_MODE_MODE2) ++ retval = -ENOSYS; ++ } else if (cmd == LIRC_SET_SEND_MODE) { ++ if (value != LIRC_MODE_PULSE) ++ retval = -ENOSYS; ++ } ++ ++ return retval; ++} ++ ++static void add_read_queue(int flag, unsigned long val) ++{ ++ unsigned int new_rx_tail; ++ int newval; ++ ++ dprintk("add flag %d with val %lu\n", flag, val); ++ ++ newval = val & PULSE_MASK; ++ ++ /* statistically pulses are ~TIME_CONST/2 too long: we could ++ maybe make this more exactly but this is good enough */ ++ if (flag) { ++ /* pulse */ ++ if (newval > TIME_CONST/2) ++ newval -= TIME_CONST/2; ++ else /* should not ever happen */ ++ newval = 1; ++ newval |= PULSE_BIT; ++ } else { ++ newval += TIME_CONST/2; ++ } ++ new_rx_tail = (rx_tail + 1) & (RBUF_LEN - 1); ++ if (new_rx_tail == rx_head) { ++ dprintk("Buffer overrun.\n"); ++ return; ++ } ++ rx_buf[rx_tail] = newval; ++ rx_tail = new_rx_tail; ++ wake_up_interruptible(&lirc_read_queue); ++} ++ ++static struct file_operations lirc_fops = { ++ .read = lirc_read, ++ .write = lirc_write, ++ .poll = lirc_poll, ++ .ioctl = lirc_ioctl, ++ .open = lirc_open, ++ .release = lirc_close, ++}; ++ ++static int set_use_inc(void *data) ++{ ++ return 0; ++} ++ ++static void set_use_dec(void *data) ++{ ++} ++ ++static struct lirc_plugin plugin = { ++ .name = LIRC_DRIVER_NAME, ++ .minor = -1, ++ .code_length = 1, ++ .sample_rate = 0, ++ .data = NULL, ++ .add_to_buf = NULL, ++ .get_queue = NULL, ++ .set_use_inc = set_use_inc, ++ .set_use_dec = set_use_dec, ++ .fops = &lirc_fops, ++ .dev = NULL, ++ .owner = THIS_MODULE, ++}; ++ ++ ++#ifdef MODULE ++static int init_chrdev(void) ++{ ++ plugin.minor = lirc_register_plugin(&plugin); ++ if (plugin.minor < 0) { ++ printk(KERN_ERR LIRC_DRIVER_NAME ": init_chrdev() failed.\n"); ++ return -EIO; ++ } ++ return 0; ++} ++ ++static void drop_chrdev(void) ++{ ++ lirc_unregister_plugin(plugin.minor); ++} ++#endif ++ ++/* SECTION: Hardware */ ++static long delta(struct timeval *tv1, struct timeval *tv2) ++{ ++ unsigned long deltv; ++ ++ deltv = tv2->tv_sec - tv1->tv_sec; ++ if (deltv > 15) ++ deltv = 0xFFFFFF; ++ else ++ deltv = deltv*1000000 + ++ tv2->tv_usec - ++ tv1->tv_usec; ++ return deltv; ++} ++ ++static void sir_timeout(unsigned long data) ++{ ++ /* if last received signal was a pulse, but receiving stopped ++ within the 9 bit frame, we need to finish this pulse and ++ simulate a signal change to from pulse to space. Otherwise ++ upper layers will receive two sequences next time. */ ++ ++ unsigned long flags; ++ unsigned long pulse_end; ++ ++ /* avoid interference with interrupt */ ++ spin_lock_irqsave(&timer_lock, flags); ++ if (last_value) { ++#ifndef LIRC_ON_SA1100 ++ /* clear unread bits in UART and restart */ ++ outb(UART_FCR_CLEAR_RCVR, io + UART_FCR); ++#endif ++ /* determine 'virtual' pulse end: */ ++ pulse_end = delta(&last_tv, &last_intr_tv); ++ dprintk("timeout add %d for %lu usec\n", last_value, pulse_end); ++ add_read_queue(last_value, pulse_end); ++ last_value = 0; ++ last_tv = last_intr_tv; ++ } ++ spin_unlock_irqrestore(&timer_lock, flags); ++} ++ ++static irqreturn_t sir_interrupt(int irq, void *dev_id) ++{ ++ unsigned char data; ++ struct timeval curr_tv; ++ static unsigned long deltv; ++#ifdef LIRC_ON_SA1100 ++ int status; ++ static int n; ++ ++ status = Ser2UTSR0; ++ /* ++ * Deal with any receive errors first. The bytes in error may be ++ * the only bytes in the receive FIFO, so we do this first. ++ */ ++ while (status & UTSR0_EIF) { ++ int bstat; ++ ++ if (debug) { ++ dprintk("EIF\n"); ++ bstat = Ser2UTSR1; ++ ++ if (bstat & UTSR1_FRE) ++ dprintk("frame error\n"); ++ if (bstat & UTSR1_ROR) ++ dprintk("receive fifo overrun\n"); ++ if (bstat & UTSR1_PRE) ++ dprintk("parity error\n"); ++ } ++ ++ bstat = Ser2UTDR; ++ n++; ++ status = Ser2UTSR0; ++ } ++ ++ if (status & (UTSR0_RFS | UTSR0_RID)) { ++ do_gettimeofday(&curr_tv); ++ deltv = delta(&last_tv, &curr_tv); ++ do { ++ data = Ser2UTDR; ++ dprintk("%d data: %u\n", n, (unsigned int) data); ++ n++; ++ } while (status & UTSR0_RID && /* do not empty fifo in ++ order to get UTSR0_RID in ++ any case */ ++ Ser2UTSR1 & UTSR1_RNE); /* data ready */ ++ ++ if (status&UTSR0_RID) { ++ add_read_queue(0 , deltv - n * TIME_CONST); /*space*/ ++ add_read_queue(1, n * TIME_CONST); /*pulse*/ ++ n = 0; ++ last_tv = curr_tv; ++ } ++ } ++ ++ if (status & UTSR0_TFS) ++ printk(KERN_ERR "transmit fifo not full, shouldn't happen\n"); ++ ++ /* ++ * We must clear certain bits. ++ */ ++ status &= (UTSR0_RID | UTSR0_RBB | UTSR0_REB); ++ if (status) ++ Ser2UTSR0 = status; ++#else ++ unsigned long deltintrtv; ++ unsigned long flags; ++ int iir, lsr; ++ ++ while ((iir = inb(io + UART_IIR) & UART_IIR_ID)) { ++ switch (iir&UART_IIR_ID) { /* FIXME toto treba preriedit */ ++ case UART_IIR_MSI: ++ (void) inb(io + UART_MSR); ++ break; ++ case UART_IIR_RLSI: ++ (void) inb(io + UART_LSR); ++ break; ++ case UART_IIR_THRI: ++#if 0 ++ if (lsr & UART_LSR_THRE) /* FIFO is empty */ ++ outb(data, io + UART_TX) ++#endif ++ break; ++ case UART_IIR_RDI: ++ /* avoid interference with timer */ ++ spin_lock_irqsave(&timer_lock, flags); ++ do { ++ del_timer(&timerlist); ++ data = inb(io + UART_RX); ++ do_gettimeofday(&curr_tv); ++ deltv = delta(&last_tv, &curr_tv); ++ deltintrtv = delta(&last_intr_tv, &curr_tv); ++ dprintk("t %lu, d %d\n", deltintrtv, (int)data); ++ /* if nothing came in last X cycles, ++ it was gap */ ++ if (deltintrtv > TIME_CONST * threshold) { ++ if (last_value) { ++ dprintk("GAP\n"); ++ /* simulate signal change */ ++ add_read_queue(last_value, ++ deltv - ++ deltintrtv); ++ last_value = 0; ++ last_tv.tv_sec = ++ last_intr_tv.tv_sec; ++ last_tv.tv_usec = ++ last_intr_tv.tv_usec; ++ deltv = deltintrtv; ++ } ++ } ++ data = 1; ++ if (data ^ last_value) { ++ /* deltintrtv > 2*TIME_CONST, ++ remember ? */ ++ /* the other case is timeout */ ++ add_read_queue(last_value, ++ deltv-TIME_CONST); ++ last_value = data; ++ last_tv = curr_tv; ++ if (last_tv.tv_usec >= TIME_CONST) { ++ last_tv.tv_usec -= TIME_CONST; ++ } else { ++ last_tv.tv_sec--; ++ last_tv.tv_usec += 1000000 - ++ TIME_CONST; ++ } ++ } ++ last_intr_tv = curr_tv; ++ if (data) { ++ /* start timer for end of ++ * sequence detection */ ++ timerlist.expires = jiffies + ++ SIR_TIMEOUT; ++ add_timer(&timerlist); ++ } ++ ++ lsr = inb(io + UART_LSR); ++ } while (lsr & UART_LSR_DR); /* data ready */ ++ spin_unlock_irqrestore(&timer_lock, flags); ++ break; ++ default: ++ break; ++ } ++ } ++#endif ++ return IRQ_RETVAL(IRQ_HANDLED); ++} ++ ++#ifdef LIRC_ON_SA1100 ++static void send_pulse(unsigned long length) ++{ ++ unsigned long k, delay; ++ int flag; ++ ++ if (length == 0) ++ return; ++ /* this won't give us the carrier frequency we really want ++ due to integer arithmetic, but we can accept this inaccuracy */ ++ ++ for (k = flag = 0; k < length; k += delay, flag = !flag) { ++ if (flag) { ++ off(); ++ delay = space_width; ++ } else { ++ on(); ++ delay = pulse_width; ++ } ++ safe_udelay(delay); ++ } ++ off(); ++} ++ ++static void send_space(unsigned long length) ++{ ++ if (length == 0) ++ return; ++ off(); ++ safe_udelay(length); ++} ++#else ++static void send_space(unsigned long len) ++{ ++ safe_udelay(len); ++} ++ ++static void send_pulse(unsigned long len) ++{ ++ long bytes_out = len / TIME_CONST; ++ long time_left; ++ ++ time_left = (long)len - (long)bytes_out * (long)TIME_CONST; ++ if (bytes_out == 0) { ++ bytes_out++; ++ time_left = 0; ++ } ++ while (bytes_out--) { ++ outb(PULSE, io + UART_TX); ++ /* FIXME treba seriozne cakanie z char/serial.c */ ++ while (!(inb(io + UART_LSR) & UART_LSR_THRE)) ++ ; ++ } ++#if 0 ++ if (time_left > 0) ++ safe_udelay(time_left); ++#endif ++} ++#endif ++ ++#ifdef CONFIG_SA1100_COLLIE ++static inline int sa1100_irda_set_power_collie(int state) ++{ ++ if (state) { ++ /* ++ * 0 - off ++ * 1 - short range, lowest power ++ * 2 - medium range, medium power ++ * 3 - maximum range, high power ++ */ ++ ucb1200_set_io_direction(TC35143_GPIO_IR_ON, ++ TC35143_IODIR_OUTPUT); ++ ucb1200_set_io(TC35143_GPIO_IR_ON, TC35143_IODAT_LOW); ++ udelay(100); ++ } else { ++ /* OFF */ ++ ucb1200_set_io_direction(TC35143_GPIO_IR_ON, ++ TC35143_IODIR_OUTPUT); ++ ucb1200_set_io(TC35143_GPIO_IR_ON, TC35143_IODAT_HIGH); ++ } ++ return 0; ++} ++#endif ++ ++static int init_hardware(void) ++{ ++ unsigned long flags; ++ ++ spin_lock_irqsave(&hardware_lock, flags); ++ /* reset UART */ ++#ifdef LIRC_ON_SA1100 ++#ifdef CONFIG_SA1100_BITSY ++ if (machine_is_bitsy()) { ++ printk(KERN_INFO "Power on IR module\n"); ++ set_bitsy_egpio(EGPIO_BITSY_IR_ON); ++ } ++#endif ++#ifdef CONFIG_SA1100_COLLIE ++ sa1100_irda_set_power_collie(3); /* power on */ ++#endif ++ sr.hscr0 = Ser2HSCR0; ++ ++ sr.utcr0 = Ser2UTCR0; ++ sr.utcr1 = Ser2UTCR1; ++ sr.utcr2 = Ser2UTCR2; ++ sr.utcr3 = Ser2UTCR3; ++ sr.utcr4 = Ser2UTCR4; ++ ++ sr.utdr = Ser2UTDR; ++ sr.utsr0 = Ser2UTSR0; ++ sr.utsr1 = Ser2UTSR1; ++ ++ /* configure GPIO */ ++ /* output */ ++ PPDR |= PPC_TXD2; ++ PSDR |= PPC_TXD2; ++ /* set output to 0 */ ++ off(); ++ ++ /* ++ * Enable HP-SIR modulation, and ensure that the port is disabled. ++ */ ++ Ser2UTCR3 = 0; ++ Ser2HSCR0 = sr.hscr0 & (~HSCR0_HSSP); ++ ++ /* clear status register to prevent unwanted interrupts */ ++ Ser2UTSR0 &= (UTSR0_RID | UTSR0_RBB | UTSR0_REB); ++ ++ /* 7N1 */ ++ Ser2UTCR0 = UTCR0_1StpBit|UTCR0_7BitData; ++ /* 115200 */ ++ Ser2UTCR1 = 0; ++ Ser2UTCR2 = 1; ++ /* use HPSIR, 1.6 usec pulses */ ++ Ser2UTCR4 = UTCR4_HPSIR|UTCR4_Z1_6us; ++ ++ /* enable receiver, receive fifo interrupt */ ++ Ser2UTCR3 = UTCR3_RXE|UTCR3_RIE; ++ ++ /* clear status register to prevent unwanted interrupts */ ++ Ser2UTSR0 &= (UTSR0_RID | UTSR0_RBB | UTSR0_REB); ++ ++#elif defined(LIRC_SIR_TEKRAM) ++ /* disable FIFO */ ++ soutp(UART_FCR, ++ UART_FCR_CLEAR_RCVR| ++ UART_FCR_CLEAR_XMIT| ++ UART_FCR_TRIGGER_1); ++ ++ /* Set DLAB 0. */ ++ soutp(UART_LCR, sinp(UART_LCR) & (~UART_LCR_DLAB)); ++ ++ /* First of all, disable all interrupts */ ++ soutp(UART_IER, sinp(UART_IER) & ++ (~(UART_IER_MSI|UART_IER_RLSI|UART_IER_THRI|UART_IER_RDI))); ++ ++ /* Set DLAB 1. */ ++ soutp(UART_LCR, sinp(UART_LCR) | UART_LCR_DLAB); ++ ++ /* Set divisor to 12 => 9600 Baud */ ++ soutp(UART_DLM, 0); ++ soutp(UART_DLL, 12); ++ ++ /* Set DLAB 0. */ ++ soutp(UART_LCR, sinp(UART_LCR) & (~UART_LCR_DLAB)); ++ ++ /* power supply */ ++ soutp(UART_MCR, UART_MCR_RTS|UART_MCR_DTR|UART_MCR_OUT2); ++ safe_udelay(50*1000); ++ ++ /* -DTR low -> reset PIC */ ++ soutp(UART_MCR, UART_MCR_RTS|UART_MCR_OUT2); ++ udelay(1*1000); ++ ++ soutp(UART_MCR, UART_MCR_RTS|UART_MCR_DTR|UART_MCR_OUT2); ++ udelay(100); ++ ++ ++ /* -RTS low -> send control byte */ ++ soutp(UART_MCR, UART_MCR_DTR|UART_MCR_OUT2); ++ udelay(7); ++ soutp(UART_TX, TEKRAM_115200|TEKRAM_PW); ++ ++ /* one byte takes ~1042 usec to transmit at 9600,8N1 */ ++ udelay(1500); ++ ++ /* back to normal operation */ ++ soutp(UART_MCR, UART_MCR_RTS|UART_MCR_DTR|UART_MCR_OUT2); ++ udelay(50); ++ ++ udelay(1500); ++ ++ /* read previous control byte */ ++ printk(KERN_INFO LIRC_DRIVER_NAME ++ ": 0x%02x\n", sinp(UART_RX)); ++ ++ /* Set DLAB 1. */ ++ soutp(UART_LCR, sinp(UART_LCR) | UART_LCR_DLAB); ++ ++ /* Set divisor to 1 => 115200 Baud */ ++ soutp(UART_DLM, 0); ++ soutp(UART_DLL, 1); ++ ++ /* Set DLAB 0, 8 Bit */ ++ soutp(UART_LCR, UART_LCR_WLEN8); ++ /* enable interrupts */ ++ soutp(UART_IER, sinp(UART_IER)|UART_IER_RDI); ++#else ++ outb(0, io + UART_MCR); ++ outb(0, io + UART_IER); ++ /* init UART */ ++ /* set DLAB, speed = 115200 */ ++ outb(UART_LCR_DLAB | UART_LCR_WLEN7, io + UART_LCR); ++ outb(1, io + UART_DLL); outb(0, io + UART_DLM); ++ /* 7N1+start = 9 bits at 115200 ~ 3 bits at 44000 */ ++ outb(UART_LCR_WLEN7, io + UART_LCR); ++ /* FIFO operation */ ++ outb(UART_FCR_ENABLE_FIFO, io + UART_FCR); ++ /* interrupts */ ++ /* outb(UART_IER_RLSI|UART_IER_RDI|UART_IER_THRI, io + UART_IER); */ ++ outb(UART_IER_RDI, io + UART_IER); ++ /* turn on UART */ ++ outb(UART_MCR_DTR|UART_MCR_RTS|UART_MCR_OUT2, io + UART_MCR); ++#ifdef LIRC_SIR_ACTISYS_ACT200L ++ init_act200(); ++#elif defined(LIRC_SIR_ACTISYS_ACT220L) ++ init_act220(); ++#endif ++#endif ++ spin_unlock_irqrestore(&hardware_lock, flags); ++ return 0; ++} ++ ++static void drop_hardware(void) ++{ ++ unsigned long flags; ++ ++ spin_lock_irqsave(&hardware_lock, flags); ++ ++#ifdef LIRC_ON_SA1100 ++ Ser2UTCR3 = 0; ++ ++ Ser2UTCR0 = sr.utcr0; ++ Ser2UTCR1 = sr.utcr1; ++ Ser2UTCR2 = sr.utcr2; ++ Ser2UTCR4 = sr.utcr4; ++ Ser2UTCR3 = sr.utcr3; ++ ++ Ser2HSCR0 = sr.hscr0; ++#ifdef CONFIG_SA1100_BITSY ++ if (machine_is_bitsy()) ++ clr_bitsy_egpio(EGPIO_BITSY_IR_ON); ++#endif ++#ifdef CONFIG_SA1100_COLLIE ++ sa1100_irda_set_power_collie(0); /* power off */ ++#endif ++#else ++ /* turn off interrupts */ ++ outb(0, io + UART_IER); ++#endif ++ spin_unlock_irqrestore(&hardware_lock, flags); ++} ++ ++/* SECTION: Initialisation */ ++ ++static int init_port(void) ++{ ++ int retval; ++ ++ /* get I/O port access and IRQ line */ ++#ifndef LIRC_ON_SA1100 ++ if (request_region(io, 8, LIRC_DRIVER_NAME) == NULL) { ++ printk(KERN_ERR LIRC_DRIVER_NAME ++ ": i/o port 0x%.4x already in use.\n", io); ++ return -EBUSY; ++ } ++#endif ++ retval = request_irq(irq, sir_interrupt, IRQF_DISABLED, ++ LIRC_DRIVER_NAME, NULL); ++ if (retval < 0) { ++# ifndef LIRC_ON_SA1100 ++ release_region(io, 8); ++# endif ++ printk(KERN_ERR LIRC_DRIVER_NAME ++ ": IRQ %d already in use.\n", ++ irq); ++ return retval; ++ } ++#ifndef LIRC_ON_SA1100 ++ printk(KERN_INFO LIRC_DRIVER_NAME ++ ": I/O port 0x%.4x, IRQ %d.\n", ++ io, irq); ++#endif ++ ++ init_timer(&timerlist); ++ timerlist.function = sir_timeout; ++ timerlist.data = 0xabadcafe; ++ ++ return 0; ++} ++ ++static void drop_port(void) ++{ ++ free_irq(irq, NULL); ++ del_timer_sync(&timerlist); ++#ifndef LIRC_ON_SA1100 ++ release_region(io, 8); ++#endif ++} ++ ++#ifdef LIRC_SIR_ACTISYS_ACT200L ++/******************************************************/ ++/* Crystal/Cirrus CS8130 IR transceiver, used in Actisys Act200L dongle */ ++/* some code borrowed from Linux IRDA driver */ ++ ++/* Regsiter 0: Control register #1 */ ++#define ACT200L_REG0 0x00 ++#define ACT200L_TXEN 0x01 /* Enable transmitter */ ++#define ACT200L_RXEN 0x02 /* Enable receiver */ ++#define ACT200L_ECHO 0x08 /* Echo control chars */ ++ ++/* Register 1: Control register #2 */ ++#define ACT200L_REG1 0x10 ++#define ACT200L_LODB 0x01 /* Load new baud rate count value */ ++#define ACT200L_WIDE 0x04 /* Expand the maximum allowable pulse */ ++ ++/* Register 3: Transmit mode register #2 */ ++#define ACT200L_REG3 0x30 ++#define ACT200L_B0 0x01 /* DataBits, 0=6, 1=7, 2=8, 3=9(8P) */ ++#define ACT200L_B1 0x02 /* DataBits, 0=6, 1=7, 2=8, 3=9(8P) */ ++#define ACT200L_CHSY 0x04 /* StartBit Synced 0=bittime, 1=startbit */ ++ ++/* Register 4: Output Power register */ ++#define ACT200L_REG4 0x40 ++#define ACT200L_OP0 0x01 /* Enable LED1C output */ ++#define ACT200L_OP1 0x02 /* Enable LED2C output */ ++#define ACT200L_BLKR 0x04 ++ ++/* Register 5: Receive Mode register */ ++#define ACT200L_REG5 0x50 ++#define ACT200L_RWIDL 0x01 /* fixed 1.6us pulse mode */ ++ /*.. other various IRDA bit modes, and TV remote modes..*/ ++ ++/* Register 6: Receive Sensitivity register #1 */ ++#define ACT200L_REG6 0x60 ++#define ACT200L_RS0 0x01 /* receive threshold bit 0 */ ++#define ACT200L_RS1 0x02 /* receive threshold bit 1 */ ++ ++/* Register 7: Receive Sensitivity register #2 */ ++#define ACT200L_REG7 0x70 ++#define ACT200L_ENPOS 0x04 /* Ignore the falling edge */ ++ ++/* Register 8,9: Baud Rate Dvider register #1,#2 */ ++#define ACT200L_REG8 0x80 ++#define ACT200L_REG9 0x90 ++ ++#define ACT200L_2400 0x5f ++#define ACT200L_9600 0x17 ++#define ACT200L_19200 0x0b ++#define ACT200L_38400 0x05 ++#define ACT200L_57600 0x03 ++#define ACT200L_115200 0x01 ++ ++/* Register 13: Control register #3 */ ++#define ACT200L_REG13 0xd0 ++#define ACT200L_SHDW 0x01 /* Enable access to shadow registers */ ++ ++/* Register 15: Status register */ ++#define ACT200L_REG15 0xf0 ++ ++/* Register 21: Control register #4 */ ++#define ACT200L_REG21 0x50 ++#define ACT200L_EXCK 0x02 /* Disable clock output driver */ ++#define ACT200L_OSCL 0x04 /* oscillator in low power, medium accuracy mode */ ++ ++static void init_act200(void) ++{ ++ int i; ++ __u8 control[] = { ++ ACT200L_REG15, ++ ACT200L_REG13 | ACT200L_SHDW, ++ ACT200L_REG21 | ACT200L_EXCK | ACT200L_OSCL, ++ ACT200L_REG13, ++ ACT200L_REG7 | ACT200L_ENPOS, ++ ACT200L_REG6 | ACT200L_RS0 | ACT200L_RS1, ++ ACT200L_REG5 | ACT200L_RWIDL, ++ ACT200L_REG4 | ACT200L_OP0 | ACT200L_OP1 | ACT200L_BLKR, ++ ACT200L_REG3 | ACT200L_B0, ++ ACT200L_REG0 | ACT200L_TXEN | ACT200L_RXEN, ++ ACT200L_REG8 | (ACT200L_115200 & 0x0f), ++ ACT200L_REG9 | ((ACT200L_115200 >> 4) & 0x0f), ++ ACT200L_REG1 | ACT200L_LODB | ACT200L_WIDE ++ }; ++ ++ /* Set DLAB 1. */ ++ soutp(UART_LCR, UART_LCR_DLAB | UART_LCR_WLEN8); ++ ++ /* Set divisor to 12 => 9600 Baud */ ++ soutp(UART_DLM, 0); ++ soutp(UART_DLL, 12); ++ ++ /* Set DLAB 0. */ ++ soutp(UART_LCR, UART_LCR_WLEN8); ++ /* Set divisor to 12 => 9600 Baud */ ++ ++ /* power supply */ ++ soutp(UART_MCR, UART_MCR_RTS|UART_MCR_DTR|UART_MCR_OUT2); ++ for (i = 0; i < 50; i++) ++ safe_udelay(1000); ++ ++ /* Reset the dongle : set RTS low for 25 ms */ ++ soutp(UART_MCR, UART_MCR_DTR|UART_MCR_OUT2); ++ for (i = 0; i < 25; i++) ++ udelay(1000); ++ ++ soutp(UART_MCR, UART_MCR_RTS|UART_MCR_DTR|UART_MCR_OUT2); ++ udelay(100); ++ ++ /* Clear DTR and set RTS to enter command mode */ ++ soutp(UART_MCR, UART_MCR_RTS|UART_MCR_OUT2); ++ udelay(7); ++ ++/* send out the control register settings for 115K 7N1 SIR operation */ ++ for (i = 0; i < sizeof(control); i++) { ++ soutp(UART_TX, control[i]); ++ /* one byte takes ~1042 usec to transmit at 9600,8N1 */ ++ udelay(1500); ++ } ++ ++ /* back to normal operation */ ++ soutp(UART_MCR, UART_MCR_RTS|UART_MCR_DTR|UART_MCR_OUT2); ++ udelay(50); ++ ++ udelay(1500); ++ soutp(UART_LCR, sinp(UART_LCR) | UART_LCR_DLAB); ++ ++ /* Set DLAB 1. */ ++ soutp(UART_LCR, UART_LCR_DLAB | UART_LCR_WLEN7); ++ ++ /* Set divisor to 1 => 115200 Baud */ ++ soutp(UART_DLM, 0); ++ soutp(UART_DLL, 1); ++ ++ /* Set DLAB 0. */ ++ soutp(UART_LCR, sinp(UART_LCR) & (~UART_LCR_DLAB)); ++ ++ /* Set DLAB 0, 7 Bit */ ++ soutp(UART_LCR, UART_LCR_WLEN7); ++ ++ /* enable interrupts */ ++ soutp(UART_IER, sinp(UART_IER)|UART_IER_RDI); ++} ++#endif ++ ++#ifdef LIRC_SIR_ACTISYS_ACT220L ++/* Derived from linux IrDA driver (net/irda/actisys.c) ++ * Drop me a mail for any kind of comment: maxx@spaceboyz.net */ ++ ++void init_act220(void) ++{ ++ int i; ++ ++ /* DLAB 1 */ ++ soutp(UART_LCR, UART_LCR_DLAB|UART_LCR_WLEN7); ++ ++ /* 9600 baud */ ++ soutp(UART_DLM, 0); ++ soutp(UART_DLL, 12); ++ ++ /* DLAB 0 */ ++ soutp(UART_LCR, UART_LCR_WLEN7); ++ ++ /* reset the dongle, set DTR low for 10us */ ++ soutp(UART_MCR, UART_MCR_RTS|UART_MCR_OUT2); ++ udelay(10); ++ ++ /* back to normal (still 9600) */ ++ soutp(UART_MCR, UART_MCR_DTR|UART_MCR_RTS|UART_MCR_OUT2); ++ ++ /* send RTS pulses until we reach 115200 ++ * i hope this is really the same for act220l/act220l+ */ ++ for (i = 0; i < 3; i++) { ++ udelay(10); ++ /* set RTS low for 10 us */ ++ soutp(UART_MCR, UART_MCR_DTR|UART_MCR_OUT2); ++ udelay(10); ++ /* set RTS high for 10 us */ ++ soutp(UART_MCR, UART_MCR_RTS|UART_MCR_DTR|UART_MCR_OUT2); ++ } ++ ++ /* back to normal operation */ ++ udelay(1500); /* better safe than sorry ;) */ ++ ++ /* Set DLAB 1. */ ++ soutp(UART_LCR, UART_LCR_DLAB | UART_LCR_WLEN7); ++ ++ /* Set divisor to 1 => 115200 Baud */ ++ soutp(UART_DLM, 0); ++ soutp(UART_DLL, 1); ++ ++ /* Set DLAB 0, 7 Bit */ ++ /* The dongle doesn't seem to have any problems with operation ++ at 7N1 */ ++ soutp(UART_LCR, UART_LCR_WLEN7); ++ ++ /* enable interrupts */ ++ soutp(UART_IER, UART_IER_RDI); ++} ++#endif ++ ++static int init_lirc_sir(void) ++{ ++ int retval; ++ ++ init_waitqueue_head(&lirc_read_queue); ++ retval = init_port(); ++ if (retval < 0) ++ return retval; ++ init_hardware(); ++ printk(KERN_INFO LIRC_DRIVER_NAME ++ ": Installed.\n"); ++ return 0; ++} ++ ++#ifdef MODULE ++ ++static int __init lirc_sir_init(void) ++{ ++ int retval; ++ ++ retval = init_chrdev(); ++ if (retval < 0) ++ return retval; ++ retval = init_lirc_sir(); ++ if (retval) { ++ drop_chrdev(); ++ return retval; ++ } ++ return 0; ++} ++ ++static void __exit lirc_sir_exit(void) ++{ ++ drop_hardware(); ++ drop_chrdev(); ++ drop_port(); ++ printk(KERN_INFO LIRC_DRIVER_NAME ": Uninstalled.\n"); ++} ++ ++module_init(lirc_sir_init); ++module_exit(lirc_sir_exit); ++ ++#ifdef LIRC_SIR_TEKRAM ++MODULE_DESCRIPTION("Infrared receiver driver for Tekram Irmate 210"); ++MODULE_AUTHOR("Christoph Bartelmus"); ++#elif defined(LIRC_ON_SA1100) ++MODULE_DESCRIPTION("LIRC driver for StrongARM SA1100 embedded microprocessor"); ++MODULE_AUTHOR("Christoph Bartelmus"); ++#elif defined(LIRC_SIR_ACTISYS_ACT200L) ++MODULE_DESCRIPTION("LIRC driver for Actisys Act200L"); ++MODULE_AUTHOR("Karl Bongers"); ++#elif defined(LIRC_SIR_ACTISYS_ACT220L) ++MODULE_DESCRIPTION("LIRC driver for Actisys Act220L(+)"); ++MODULE_AUTHOR("Jan Roemisch"); ++#else ++MODULE_DESCRIPTION("Infrared receiver driver for SIR type serial ports"); ++MODULE_AUTHOR("Milan Pikula"); ++#endif ++MODULE_LICENSE("GPL"); ++ ++#ifdef LIRC_ON_SA1100 ++module_param(irq, int, 0444); ++MODULE_PARM_DESC(irq, "Interrupt (16)"); ++#else ++module_param(io, int, 0444); ++MODULE_PARM_DESC(io, "I/O address base (0x3f8 or 0x2f8)"); ++ ++module_param(irq, int, 0444); ++MODULE_PARM_DESC(irq, "Interrupt (4 or 3)"); ++ ++module_param(threshold, int, 0444); ++MODULE_PARM_DESC(threshold, "space detection threshold (3)"); ++#endif ++ ++module_param(debug, bool, 0644); ++MODULE_PARM_DESC(debug, "Enable debugging messages"); ++ ++#endif /* MODULE */ +diff --git a/drivers/input/lirc/lirc_streamzap.c b/drivers/input/lirc/lirc_streamzap.c +new file mode 100644 +index 0000000..69865cb +--- /dev/null ++++ b/drivers/input/lirc/lirc_streamzap.c +@@ -0,0 +1,795 @@ ++/* ++ * Streamzap Remote Control driver ++ * ++ * Copyright (c) 2005 Christoph Bartelmus ++ * ++ * This driver was based on the work of Greg Wickham and Adrian ++ * Dewhurst. It was substantially rewritten to support correct signal ++ * gaps and now maintains a delay buffer, which is used to present ++ * consistent timing behaviour to user space applications. Without the ++ * delay buffer an ugly hack would be required in lircd, which can ++ * cause sluggish signal decoding in certain situations. ++ * ++ * This driver is based on the USB skeleton driver packaged with the ++ * kernel; copyright (C) 2001-2003 Greg Kroah-Hartman (greg@kroah.com) ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ++ * ++ */ ++ ++#include ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "lirc.h" ++#include "lirc_dev.h" ++ ++#define DRIVER_VERSION "1.28" ++#define DRIVER_NAME "lirc_streamzap" ++#define DRIVER_DESC "Streamzap Remote Control driver" ++ ++/* ------------------------------------------------------------------ */ ++ ++static int debug; ++ ++#define USB_STREAMZAP_VENDOR_ID 0x0e9c ++#define USB_STREAMZAP_PRODUCT_ID 0x0000 ++ ++/* Use our own dbg macro */ ++#define dprintk(fmt, args...) \ ++ do { \ ++ if (debug) \ ++ printk(KERN_DEBUG DRIVER_NAME "[%d]: " \ ++ fmt "\n", ## args); \ ++ } while (0) ++ ++/* ++ * table of devices that work with this driver ++ */ ++static struct usb_device_id streamzap_table[] = { ++ /* Streamzap Remote Control */ ++ { USB_DEVICE(USB_STREAMZAP_VENDOR_ID, USB_STREAMZAP_PRODUCT_ID) }, ++ /* Terminating entry */ ++ { } ++}; ++ ++MODULE_DEVICE_TABLE(usb, streamzap_table); ++ ++#define STREAMZAP_PULSE_MASK 0xf0 ++#define STREAMZAP_SPACE_MASK 0x0f ++#define STREAMZAP_RESOLUTION 256 ++ ++/* number of samples buffered */ ++#define STREAMZAP_BUFFER_SIZE 128 ++ ++enum StreamzapDecoderState { ++ PulseSpace, ++ FullPulse, ++ FullSpace, ++ IgnorePulse ++}; ++ ++/* Structure to hold all of our device specific stuff */ ++/* some remarks regarding locking: ++ theoretically this struct can be accessed from three threads: ++ ++ - from lirc_dev through set_use_inc/set_use_dec ++ ++ - from the USB layer throuh probe/disconnect/irq ++ ++ Careful placement of lirc_register_plugin/lirc_unregister_plugin ++ calls will prevent conflicts. lirc_dev makes sure that ++ set_use_inc/set_use_dec are not being executed and will not be ++ called after lirc_unregister_plugin returns. ++ ++ - by the timer callback ++ ++ The timer is only running when the device is connected and the ++ LIRC device is open. Making sure the timer is deleted by ++ set_use_dec will make conflicts impossible. ++*/ ++struct usb_streamzap { ++ ++ /* usb */ ++ /* save off the usb device pointer */ ++ struct usb_device *udev; ++ /* the interface for this device */ ++ struct usb_interface *interface; ++ ++ /* buffer & dma */ ++ unsigned char *buf_in; ++ dma_addr_t dma_in; ++ unsigned int buf_in_len; ++ ++ struct usb_endpoint_descriptor *endpoint; ++ ++ /* IRQ */ ++ struct urb *urb_in; ++ ++ /* lirc */ ++ struct lirc_plugin plugin; ++ struct lirc_buffer delay_buf; ++ struct lirc_buffer lirc_buf; ++ ++ /* timer used to support delay buffering */ ++ struct timer_list delay_timer; ++ int timer_running; ++ spinlock_t timer_lock; ++ ++ /* tracks whether we are currently receiving some signal */ ++ int idle; ++ /* sum of signal lengths received since signal start */ ++ unsigned long sum; ++ /* start time of signal; necessary for gap tracking */ ++ struct timeval signal_last; ++ struct timeval signal_start; ++ enum StreamzapDecoderState decoder_state; ++ struct timer_list flush_timer; ++ int flush; ++ int in_use; ++}; ++ ++ ++/* local function prototypes */ ++static int streamzap_probe(struct usb_interface *interface, ++ const struct usb_device_id *id); ++static void streamzap_disconnect(struct usb_interface *interface); ++static void usb_streamzap_irq(struct urb *urb); ++static int streamzap_use_inc(void *data); ++static void streamzap_use_dec(void *data); ++static int streamzap_ioctl(struct inode *node, struct file *filep, ++ unsigned int cmd, unsigned long arg); ++static int streamzap_suspend(struct usb_interface *intf, pm_message_t message); ++static int streamzap_resume(struct usb_interface *intf); ++ ++/* usb specific object needed to register this driver with the usb subsystem */ ++ ++static struct usb_driver streamzap_driver = { ++ .name = DRIVER_NAME, ++ .probe = streamzap_probe, ++ .disconnect = streamzap_disconnect, ++ .suspend = streamzap_suspend, ++ .resume = streamzap_resume, ++ .id_table = streamzap_table, ++}; ++ ++static void stop_timer(struct usb_streamzap *sz) ++{ ++ unsigned long flags; ++ ++ spin_lock_irqsave(&sz->timer_lock, flags); ++ if (sz->timer_running) { ++ sz->timer_running = 0; ++ del_timer_sync(&sz->delay_timer); ++ } ++ spin_unlock_irqrestore(&sz->timer_lock, flags); ++} ++ ++static void flush_timeout(unsigned long arg) ++{ ++ struct usb_streamzap *sz = (struct usb_streamzap *) arg; ++ ++ /* finally start accepting data */ ++ sz->flush = 0; ++} ++static void delay_timeout(unsigned long arg) ++{ ++ unsigned long flags; ++ /* deliver data every 10 ms */ ++ static unsigned long timer_inc = ++ (10000/(1000000/HZ)) == 0 ? 1 : (10000/(1000000/HZ)); ++ struct usb_streamzap *sz = (struct usb_streamzap *) arg; ++ int data; ++ ++ spin_lock_irqsave(&sz->timer_lock, flags); ++ ++ if (!lirc_buffer_empty(&sz->delay_buf) && ++ !lirc_buffer_full(&sz->lirc_buf)) { ++ lirc_buffer_read_1(&sz->delay_buf, (unsigned char *) &data); ++ lirc_buffer_write_1(&sz->lirc_buf, (unsigned char *) &data); ++ } ++ if (!lirc_buffer_empty(&sz->delay_buf)) { ++ while (lirc_buffer_available(&sz->delay_buf) < ++ STREAMZAP_BUFFER_SIZE/2 && ++ !lirc_buffer_full(&sz->lirc_buf)) { ++ lirc_buffer_read_1(&sz->delay_buf, ++ (unsigned char *) &data); ++ lirc_buffer_write_1(&sz->lirc_buf, ++ (unsigned char *) &data); ++ } ++ if (sz->timer_running) { ++ sz->delay_timer.expires += timer_inc; ++ add_timer(&sz->delay_timer); ++ } ++ } else { ++ sz->timer_running = 0; ++ } ++ ++ if (!lirc_buffer_empty(&sz->lirc_buf)) ++ wake_up(&sz->lirc_buf.wait_poll); ++ ++ spin_unlock_irqrestore(&sz->timer_lock, flags); ++} ++ ++static inline void flush_delay_buffer(struct usb_streamzap *sz) ++{ ++ int data; ++ int empty = 1; ++ ++ while (!lirc_buffer_empty(&sz->delay_buf)) { ++ empty = 0; ++ lirc_buffer_read_1(&sz->delay_buf, (unsigned char *) &data); ++ if (!lirc_buffer_full(&sz->lirc_buf)) { ++ lirc_buffer_write_1(&sz->lirc_buf, ++ (unsigned char *) &data); ++ } else { ++ dprintk("buffer overflow\n", sz->plugin.minor); ++ } ++ } ++ if (!empty) ++ wake_up(&sz->lirc_buf.wait_poll); ++} ++ ++static inline void push(struct usb_streamzap *sz, unsigned char *data) ++{ ++ unsigned long flags; ++ ++ spin_lock_irqsave(&sz->timer_lock, flags); ++ if (lirc_buffer_full(&sz->delay_buf)) { ++ int data; ++ ++ lirc_buffer_read_1(&sz->delay_buf, (unsigned char *) &data); ++ if (!lirc_buffer_full(&sz->lirc_buf)) { ++ lirc_buffer_write_1(&sz->lirc_buf, ++ (unsigned char *) &data); ++ } else { ++ dprintk("buffer overflow", sz->plugin.minor); ++ } ++ } ++ ++ lirc_buffer_write_1(&sz->delay_buf, data); ++ ++ if (!sz->timer_running) { ++ sz->delay_timer.expires = jiffies + HZ/10; ++ add_timer(&sz->delay_timer); ++ sz->timer_running = 1; ++ } ++ ++ spin_unlock_irqrestore(&sz->timer_lock, flags); ++} ++ ++static inline void push_full_pulse(struct usb_streamzap *sz, ++ unsigned char value) ++{ ++ int pulse; ++ ++ if (sz->idle) { ++ long deltv; ++ int tmp; ++ ++ sz->signal_last = sz->signal_start; ++ do_gettimeofday(&sz->signal_start); ++ ++ deltv = sz->signal_start.tv_sec-sz->signal_last.tv_sec; ++ if (deltv > 15) { ++ tmp = PULSE_MASK; /* really long time */ ++ } else { ++ tmp = (int) (deltv*1000000+ ++ sz->signal_start.tv_usec - ++ sz->signal_last.tv_usec); ++ tmp -= sz->sum; ++ } ++ dprintk("ls %u", sz->plugin.minor, tmp); ++ push(sz, (char *)&tmp); ++ ++ sz->idle = 0; ++ sz->sum = 0; ++ } ++ ++ pulse = ((int) value)*STREAMZAP_RESOLUTION; ++ pulse += STREAMZAP_RESOLUTION/2; ++ sz->sum += pulse; ++ pulse |= PULSE_BIT; ++ ++ dprintk("p %u", sz->plugin.minor, pulse&PULSE_MASK); ++ push(sz, (char *)&pulse); ++} ++ ++static inline void push_half_pulse(struct usb_streamzap *sz, ++ unsigned char value) ++{ ++ push_full_pulse(sz, (value & STREAMZAP_PULSE_MASK)>>4); ++} ++ ++static inline void push_full_space(struct usb_streamzap *sz, ++ unsigned char value) ++{ ++ int space; ++ ++ space = ((int) value)*STREAMZAP_RESOLUTION; ++ space += STREAMZAP_RESOLUTION/2; ++ sz->sum += space; ++ dprintk("s %u", sz->plugin.minor, space); ++ push(sz, (char *)&space); ++} ++ ++static inline void push_half_space(struct usb_streamzap *sz, ++ unsigned char value) ++{ ++ push_full_space(sz, value & STREAMZAP_SPACE_MASK); ++} ++ ++/* ++ * usb_streamzap_irq - IRQ handler ++ * ++ * This procedure is invoked on reception of data from ++ * the usb remote. ++ */ ++static void usb_streamzap_irq(struct urb *urb) ++{ ++ struct usb_streamzap *sz; ++ int len; ++ unsigned int i = 0; ++ ++ if (!urb) ++ return; ++ ++ sz = urb->context; ++ len = urb->actual_length; ++ ++ switch (urb->status) { ++ case -ECONNRESET: ++ case -ENOENT: ++ case -ESHUTDOWN: ++ /* this urb is terminated, clean up */ ++ /* sz might already be invalid at this point */ ++ dprintk("urb status: %d", -1, urb->status); ++ return; ++ default: ++ break; ++ } ++ ++ dprintk("received %d", sz->plugin.minor, urb->actual_length); ++ if (!sz->flush) { ++ for (i = 0; i < urb->actual_length; i++) { ++ dprintk("%d: %x", sz->plugin.minor, ++ i, (unsigned char) sz->buf_in[i]); ++ switch (sz->decoder_state) { ++ case PulseSpace: ++ if ((sz->buf_in[i]&STREAMZAP_PULSE_MASK) == ++ STREAMZAP_PULSE_MASK) { ++ sz->decoder_state = FullPulse; ++ continue; ++ } else if ((sz->buf_in[i]&STREAMZAP_SPACE_MASK) ++ == STREAMZAP_SPACE_MASK) { ++ push_half_pulse(sz, sz->buf_in[i]); ++ sz->decoder_state = FullSpace; ++ continue; ++ } else { ++ push_half_pulse(sz, sz->buf_in[i]); ++ push_half_space(sz, sz->buf_in[i]); ++ } ++ break; ++ case FullPulse: ++ push_full_pulse(sz, sz->buf_in[i]); ++ sz->decoder_state = IgnorePulse; ++ break; ++ case FullSpace: ++ if (sz->buf_in[i] == 0xff) { ++ sz->idle = 1; ++ stop_timer(sz); ++ flush_delay_buffer(sz); ++ } else ++ push_full_space(sz, sz->buf_in[i]); ++ sz->decoder_state = PulseSpace; ++ break; ++ case IgnorePulse: ++ if ((sz->buf_in[i]&STREAMZAP_SPACE_MASK) == ++ STREAMZAP_SPACE_MASK) { ++ sz->decoder_state = FullSpace; ++ continue; ++ } ++ push_half_space(sz, sz->buf_in[i]); ++ sz->decoder_state = PulseSpace; ++ break; ++ } ++ } ++ } ++ ++ /* resubmit only for 2.6 */ ++ usb_submit_urb(urb, GFP_ATOMIC); ++ ++ return; ++} ++ ++/** ++ * streamzap_probe ++ * ++ * Called by usb-core to associated with a candidate device ++ * On any failure the return value is the ERROR ++ * On success return 0 ++ */ ++static int streamzap_probe(struct usb_interface *interface, ++ const struct usb_device_id *id) ++{ ++ struct usb_device *udev = interface_to_usbdev(interface); ++ struct usb_host_interface *iface_host; ++ int retval = -ENOMEM; ++ struct usb_streamzap *sz = NULL; ++ char buf[63], name[128] = ""; ++ ++ /*************************************************** ++ * Allocate space for device driver specific data ++ */ ++ sz = kmalloc(sizeof(struct usb_streamzap), GFP_KERNEL); ++ if (sz == NULL) ++ goto error; ++ ++ memset(sz, 0, sizeof(*sz)); ++ sz->udev = udev; ++ sz->interface = interface; ++ ++ /*************************************************** ++ * Check to ensure endpoint information matches requirements ++ */ ++ iface_host = interface->cur_altsetting; ++ ++ if (iface_host->desc.bNumEndpoints != 1) { ++ err("%s: Unexpected desc.bNumEndpoints (%d)", __func__, ++ iface_host->desc.bNumEndpoints); ++ retval = -ENODEV; ++ goto error; ++ } ++ ++ sz->endpoint = &(iface_host->endpoint[0].desc); ++ if ((sz->endpoint->bEndpointAddress & USB_ENDPOINT_DIR_MASK) ++ != USB_DIR_IN) { ++ err("%s: endpoint doesn't match input device 02%02x", ++ __func__, sz->endpoint->bEndpointAddress); ++ retval = -ENODEV; ++ goto error; ++ } ++ ++ if ((sz->endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ++ != USB_ENDPOINT_XFER_INT) { ++ err("%s: endpoint attributes don't match xfer 02%02x", ++ __func__, sz->endpoint->bmAttributes); ++ retval = -ENODEV; ++ goto error; ++ } ++ ++ if (sz->endpoint->wMaxPacketSize == 0) { ++ err("%s: endpoint message size==0? ", __func__); ++ retval = -ENODEV; ++ goto error; ++ } ++ ++ /*************************************************** ++ * Allocate the USB buffer and IRQ URB ++ */ ++ ++ sz->buf_in_len = sz->endpoint->wMaxPacketSize; ++ sz->buf_in = usb_buffer_alloc(sz->udev, sz->buf_in_len, ++ GFP_ATOMIC, &sz->dma_in); ++ if (sz->buf_in == NULL) ++ goto error; ++ ++ sz->urb_in = usb_alloc_urb(0, GFP_KERNEL); ++ if (sz->urb_in == NULL) ++ goto error; ++ ++ /*************************************************** ++ * Connect this device to the LIRC sub-system ++ */ ++ ++ if (lirc_buffer_init(&sz->lirc_buf, sizeof(int), ++ STREAMZAP_BUFFER_SIZE)) ++ goto error; ++ ++ if (lirc_buffer_init(&sz->delay_buf, sizeof(int), ++ STREAMZAP_BUFFER_SIZE)) { ++ lirc_buffer_free(&sz->lirc_buf); ++ goto error; ++ } ++ ++ /*************************************************** ++ * As required memory is allocated now populate the plugin structure ++ */ ++ ++ memset(&sz->plugin, 0, sizeof(sz->plugin)); ++ ++ strcpy(sz->plugin.name, DRIVER_NAME); ++ sz->plugin.minor = -1; ++ sz->plugin.sample_rate = 0; ++ sz->plugin.code_length = sizeof(int) * 8; ++ sz->plugin.features = LIRC_CAN_REC_MODE2 | LIRC_CAN_GET_REC_RESOLUTION; ++ sz->plugin.data = sz; ++ sz->plugin.rbuf = &sz->lirc_buf; ++ sz->plugin.set_use_inc = &streamzap_use_inc; ++ sz->plugin.set_use_dec = &streamzap_use_dec; ++ sz->plugin.ioctl = streamzap_ioctl; ++ sz->plugin.dev = &udev->dev; ++ sz->plugin.owner = THIS_MODULE; ++ ++ sz->idle = 1; ++ sz->decoder_state = PulseSpace; ++ init_timer(&sz->delay_timer); ++ sz->delay_timer.function = delay_timeout; ++ sz->delay_timer.data = (unsigned long) sz; ++ sz->timer_running = 0; ++ spin_lock_init(&sz->timer_lock); ++ ++ init_timer(&sz->flush_timer); ++ sz->flush_timer.function = flush_timeout; ++ sz->flush_timer.data = (unsigned long) sz; ++ /*************************************************** ++ * Complete final initialisations ++ */ ++ ++ usb_fill_int_urb(sz->urb_in, udev, ++ usb_rcvintpipe(udev, sz->endpoint->bEndpointAddress), ++ sz->buf_in, sz->buf_in_len, usb_streamzap_irq, sz, ++ sz->endpoint->bInterval); ++ ++ if (udev->descriptor.iManufacturer ++ && usb_string(udev, udev->descriptor.iManufacturer, buf, 63) > 0) ++ strncpy(name, buf, 128); ++ ++ if (udev->descriptor.iProduct ++ && usb_string(udev, udev->descriptor.iProduct, buf, 63) > 0) ++ snprintf(name, 128, "%s %s", name, buf); ++ ++ printk(KERN_INFO DRIVER_NAME "[%d]: %s on usb%d:%d attached\n", ++ sz->plugin.minor, name, ++ udev->bus->busnum, sz->udev->devnum); ++ ++ usb_set_intfdata(interface, sz); ++ ++ if (lirc_register_plugin(&sz->plugin) < 0) { ++ lirc_buffer_free(&sz->delay_buf); ++ lirc_buffer_free(&sz->lirc_buf); ++ goto error; ++ } ++ ++ return 0; ++ ++error: ++ ++ /*************************************************** ++ * Premise is that a 'goto error' can be invoked from inside the ++ * probe function and all necessary cleanup actions will be taken ++ * including freeing any necessary memory blocks ++ */ ++ ++ if (retval == -ENOMEM) ++ err("Out of memory"); ++ ++ if (sz) { ++ usb_free_urb(sz->urb_in); ++ usb_buffer_free(udev, sz->buf_in_len, sz->buf_in, sz->dma_in); ++ kfree(sz); ++ } ++ ++ return retval; ++} ++ ++static int streamzap_use_inc(void *data) ++{ ++ struct usb_streamzap *sz = data; ++ ++ if (!sz) { ++ dprintk("%s called with no context", -1, __func__); ++ return -EINVAL; ++ } ++ dprintk("set use inc", sz->plugin.minor); ++ ++ while (!lirc_buffer_empty(&sz->lirc_buf)) ++ lirc_buffer_remove_1(&sz->lirc_buf); ++ while (!lirc_buffer_empty(&sz->delay_buf)) ++ lirc_buffer_remove_1(&sz->delay_buf); ++ ++ sz->flush_timer.expires = jiffies + HZ; ++ sz->flush = 1; ++ add_timer(&sz->flush_timer); ++ ++ sz->urb_in->dev = sz->udev; ++ if (usb_submit_urb(sz->urb_in, GFP_ATOMIC)) { ++ dprintk("open result = -EIO error submitting urb", ++ sz->plugin.minor); ++ return -EIO; ++ } ++ sz->in_use++; ++ ++ return 0; ++} ++ ++static void streamzap_use_dec(void *data) ++{ ++ struct usb_streamzap *sz = data; ++ ++ if (!sz) { ++ dprintk("%s called with no context", -1, __func__); ++ return; ++ } ++ dprintk("set use dec", sz->plugin.minor); ++ ++ if (sz->flush) { ++ sz->flush = 0; ++ del_timer_sync(&sz->flush_timer); ++ } ++ ++ stop_timer(sz); ++ ++ usb_kill_urb(sz->urb_in); ++ ++ sz->in_use--; ++} ++ ++static int streamzap_ioctl(struct inode *node, struct file *filep, ++ unsigned int cmd, unsigned long arg) ++{ ++ int result; ++ ++ switch (cmd) { ++ case LIRC_GET_REC_RESOLUTION: ++ result = put_user(STREAMZAP_RESOLUTION, (unsigned long *) arg); ++ if (result) ++ return result; ++ break; ++ default: ++ return -ENOIOCTLCMD; ++ } ++ return 0; ++} ++ ++/** ++ * streamzap_disconnect ++ * ++ * Called by the usb core when the device is removed from the system. ++ * ++ * This routine guarantees that the driver will not submit any more urbs ++ * by clearing dev->udev. It is also supposed to terminate any currently ++ * active urbs. Unfortunately, usb_bulk_msg(), used in streamzap_read(), ++ * does not provide any way to do this. ++ */ ++static void streamzap_disconnect(struct usb_interface *interface) ++{ ++ struct usb_streamzap *sz; ++ int errnum; ++ int minor; ++ ++ sz = usb_get_intfdata(interface); ++ ++ /* ++ * unregister from the LIRC sub-system ++ */ ++ ++ errnum = lirc_unregister_plugin(sz->plugin.minor); ++ if (errnum != 0) ++ dprintk("error in lirc_unregister: (returned %d)", ++ sz->plugin.minor, errnum); ++ ++ lirc_buffer_free(&sz->delay_buf); ++ lirc_buffer_free(&sz->lirc_buf); ++ ++ /* ++ * unregister from the USB sub-system ++ */ ++ ++ usb_free_urb(sz->urb_in); ++ ++ usb_buffer_free(sz->udev, sz->buf_in_len, sz->buf_in, sz->dma_in); ++ ++ minor = sz->plugin.minor; ++ kfree(sz); ++ ++ printk(KERN_INFO DRIVER_NAME "[%d]: disconnected\n", minor); ++} ++ ++static int streamzap_suspend(struct usb_interface *intf, pm_message_t message) ++{ ++ struct usb_streamzap *sz = usb_get_intfdata(intf); ++ ++ printk(DRIVER_NAME "[%d]: suspend\n", sz->plugin.minor); ++ if (sz->in_use) { ++ if (sz->flush) { ++ sz->flush = 0; ++ del_timer_sync(&sz->flush_timer); ++ } ++ ++ stop_timer(sz); ++ ++ usb_kill_urb(sz->urb_in); ++ } ++ return 0; ++} ++ ++static int streamzap_resume(struct usb_interface *intf) ++{ ++ struct usb_streamzap *sz = usb_get_intfdata(intf); ++ ++ while (!lirc_buffer_empty(&sz->lirc_buf)) ++ lirc_buffer_remove_1(&sz->lirc_buf); ++ while (!lirc_buffer_empty(&sz->delay_buf)) ++ lirc_buffer_remove_1(&sz->delay_buf); ++ ++ if (sz->in_use) { ++ sz->flush_timer.expires = jiffies + HZ; ++ sz->flush = 1; ++ add_timer(&sz->flush_timer); ++ ++ sz->urb_in->dev = sz->udev; ++ if (usb_submit_urb(sz->urb_in, GFP_ATOMIC)) { ++ dprintk("open result = -EIO error submitting urb", ++ sz->plugin.minor); ++ return -EIO; ++ } ++ } ++ return 0; ++} ++ ++#ifdef MODULE ++ ++/** ++ * usb_streamzap_init ++ */ ++static int __init usb_streamzap_init(void) ++{ ++ int result; ++ ++ /* register this driver with the USB subsystem */ ++ ++ result = usb_register(&streamzap_driver); ++ ++ if (result) { ++ err("usb_register failed. Error number %d", ++ result); ++ return result; ++ } ++ ++ printk(KERN_INFO DRIVER_NAME " " DRIVER_VERSION " registered\n"); ++ return 0; ++} ++ ++/** ++ * usb_streamzap_exit ++ */ ++static void __exit usb_streamzap_exit(void) ++{ ++ /* deregister this driver with the USB subsystem */ ++ usb_deregister(&streamzap_driver); ++} ++ ++ ++module_init(usb_streamzap_init); ++module_exit(usb_streamzap_exit); ++ ++MODULE_AUTHOR("Christoph Bartelmus, Greg Wickham, Adrian Dewhurst"); ++MODULE_DESCRIPTION(DRIVER_DESC); ++MODULE_LICENSE("GPL"); ++ ++module_param(debug, bool, 0644); ++MODULE_PARM_DESC(debug, "Enable debugging messages"); ++ ++#endif /* MODULE */ +diff --git a/drivers/input/lirc/lirc_ttusbir.c b/drivers/input/lirc/lirc_ttusbir.c +new file mode 100644 +index 0000000..9ed9c7b +--- /dev/null ++++ b/drivers/input/lirc/lirc_ttusbir.c +@@ -0,0 +1,400 @@ ++/**************************************************************************** ++ ** lirc_ttusbir.c *********************************************************** ++ **************************************************************************** ++ * ++ * lirc_ttusbir - LIRC device driver for the TechnoTrend USB IR Receiver ++ * ++ * Copyright (C) 2007 Stefan Macher ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ++ * ++ */ ++ ++/* This LIRC driver provides access to the TechnoTrend USB IR Receiver. ++ * The receiver delivers the IR signal as raw sampled true/false data in ++ * isochronous USB packets each of size 128 byte. ++ * Currently the driver reduces the sampling rate by factor of 8 as this ++ * is still more than enough to decode RC-5 - others should be analyzed. ++ * But the driver does not rely on RC-5 it should be able to decode every ++ * IR signal that is not too fast. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "lirc.h" ++#include "lirc_dev.h" ++ ++MODULE_DESCRIPTION("TechnoTrend USB IR device driver for LIRC"); ++MODULE_AUTHOR("Stefan Macher (st_maker-lirc@yahoo.de)"); ++MODULE_LICENSE("GPL"); ++ ++/* #define DEBUG */ ++#ifdef DEBUG ++#define DPRINTK printk ++#else ++#define DPRINTK(_x_, a...) ++#endif ++ ++/* function declarations */ ++static int probe(struct usb_interface *intf, const struct usb_device_id *id); ++static void disconnect(struct usb_interface *intf); ++static void urb_complete(struct urb *urb); ++static int set_use_inc(void *data); ++static void set_use_dec(void *data); ++ ++static int num_urbs = 2; ++module_param(num_urbs, int, 0444); ++MODULE_PARM_DESC(num_urbs, ++ "Number of URBs in queue. Try to increase to 4 in case " ++ "of problems (default: 2; minimum: 2)"); ++ ++/* table of devices that work with this driver */ ++static struct usb_device_id device_id_table[] = { ++ /* TechnoTrend USB IR Receiver */ ++ { USB_DEVICE(0x0B48, 0x2003) }, ++ /* Terminating entry */ ++ { } ++}; ++MODULE_DEVICE_TABLE(usb, device_id_table); ++ ++/* USB driver definition */ ++static struct usb_driver driver = { ++ .name = "TTUSBIR", ++ .id_table = &(device_id_table[0]), ++ .probe = probe, ++ .disconnect = disconnect, ++}; ++ ++/* USB device definition */ ++struct ttusbir_device { ++ struct usb_driver *driver; ++ struct usb_device *udev; ++ struct usb_interface *interf; ++ struct usb_class_driver class_driver; ++ unsigned int ifnum; /* Interface number to use */ ++ unsigned int alt_setting; /* alternate setting to use */ ++ unsigned int endpoint; /* Endpoint to use */ ++ struct urb **urb; /* num_urb URB pointers*/ ++ char **buffer; /* 128 byte buffer for each URB */ ++ struct lirc_buffer rbuf; /* Buffer towards LIRC */ ++ struct lirc_plugin plugin; ++ int minor; ++ int last_pulse; /* remembers if last received byte was pulse or space */ ++ int last_num; /* remembers how many last bytes appeared */ ++ int opened; ++}; ++ ++/************************************* ++ * LIRC specific functions ++ */ ++static int set_use_inc(void *data) ++{ ++ int i; ++ struct ttusbir_device *ttusbir = data; ++ ++ DPRINTK("Sending first URBs\n"); ++ /* @TODO Do I need to check if I am already opened */ ++ ttusbir->opened = 1; ++ ++ for (i = 0; i < num_urbs; i++) ++ usb_submit_urb(ttusbir->urb[i], GFP_KERNEL); ++ ++ return 0; ++} ++ ++static void set_use_dec(void *data) ++{ ++ struct ttusbir_device *ttusbir = data; ++ ++ DPRINTK("Device closed\n"); ++ ++ ttusbir->opened = 0; ++} ++ ++/************************************* ++ * USB specific functions ++ */ ++ ++/* This mapping table is used to do a very simple filtering of the ++ * input signal. ++ * For a value with at least 4 bits set it returns 0xFF otherwise ++ * 0x00. For faster IR signals this can not be used. But for RC-5 we ++ * still have about 14 samples per pulse/space, i.e. we sample with 14 ++ * times higher frequency than the signal frequency */ ++const unsigned char map_table[] = ++{ ++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, ++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, ++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, ++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, ++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, ++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, ++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, ++ 0x00, 0x00, 0x00, 0xFF, 0x00, 0xFF, 0xFF, 0xFF, ++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, ++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, ++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, ++ 0x00, 0x00, 0x00, 0xFF, 0x00, 0xFF, 0xFF, 0xFF, ++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, ++ 0x00, 0x00, 0x00, 0xFF, 0x00, 0xFF, 0xFF, 0xFF, ++ 0x00, 0x00, 0x00, 0xFF, 0x00, 0xFF, 0xFF, 0xFF, ++ 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, ++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, ++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, ++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, ++ 0x00, 0x00, 0x00, 0xFF, 0x00, 0xFF, 0xFF, 0xFF, ++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, ++ 0x00, 0x00, 0x00, 0xFF, 0x00, 0xFF, 0xFF, 0xFF, ++ 0x00, 0x00, 0x00, 0xFF, 0x00, 0xFF, 0xFF, 0xFF, ++ 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, ++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, ++ 0x00, 0x00, 0x00, 0xFF, 0x00, 0xFF, 0xFF, 0xFF, ++ 0x00, 0x00, 0x00, 0xFF, 0x00, 0xFF, 0xFF, 0xFF, ++ 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, ++ 0x00, 0x00, 0x00, 0xFF, 0x00, 0xFF, 0xFF, 0xFF, ++ 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, ++ 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, ++ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF ++}; ++ ++static void urb_complete(struct urb *urb) ++{ ++ struct ttusbir_device *ttusbir; ++ unsigned char *buf; ++ int i; ++ int l; ++ ++ ttusbir = urb->context; ++ ++ if (!ttusbir->opened) ++ return; ++ ++ buf = (unsigned char *)urb->transfer_buffer; ++ ++ for (i = 0; i < 128; i++) { ++ /* Here we do the filtering and some kind of down sampling */ ++ buf[i] = ~map_table[buf[i]]; ++ if (ttusbir->last_pulse == buf[i]) { ++ if (ttusbir->last_num < PULSE_MASK/63) ++ ttusbir->last_num++; ++ /* else we are in a idle period and do not need to ++ * increment any longer */ ++ } else { ++ l = ttusbir->last_num * 62; /* about 62 = us/byte */ ++ if (ttusbir->last_pulse) /* pulse or space? */ ++ l |= PULSE_BIT; ++ if (!lirc_buffer_full(&ttusbir->rbuf)) { ++ lirc_buffer_write_1(&ttusbir->rbuf, (void *)&l); ++ wake_up_interruptible(&ttusbir->rbuf.wait_poll); ++ } ++ ttusbir->last_num = 0; ++ ttusbir->last_pulse = buf[i]; ++ } ++ } ++ usb_submit_urb(urb, GFP_ATOMIC); /* keep data rolling :-) */ ++} ++ ++/* Called whenever the USB subsystem thinks we could be the right driver ++ to handle this device ++*/ ++static int probe(struct usb_interface *intf, const struct usb_device_id *id) ++{ ++ int alt_set, endp; ++ int found = 0; ++ int i, j; ++ int struct_size; ++ struct usb_host_interface *host_interf; ++ struct usb_interface_descriptor *interf_desc; ++ struct usb_host_endpoint *host_endpoint; ++ struct ttusbir_device *ttusbir; ++ ++ DPRINTK("Module ttusbir probe\n"); ++ ++ /* To reduce memory fragmentation we use only one allocation */ ++ struct_size = sizeof(struct ttusbir_device) + ++ (sizeof(struct urb *) * num_urbs) + ++ (sizeof(char *) * num_urbs) + ++ (num_urbs * 128); ++ ttusbir = kmalloc(struct_size, GFP_KERNEL); ++ if (!ttusbir) ++ return -ENOMEM; ++ memset(ttusbir, 0, struct_size); ++ ++ ttusbir->urb = (struct urb **)((char *)ttusbir + ++ sizeof(struct ttusbir_device)); ++ ttusbir->buffer = (char **)((char *)ttusbir->urb + ++ (sizeof(struct urb *) * num_urbs)); ++ for (i = 0; i < num_urbs; i++) ++ ttusbir->buffer[i] = (char *)ttusbir->buffer + ++ (sizeof(char *)*num_urbs) + (i * 128); ++ ++ ttusbir->driver = &driver; ++ ttusbir->alt_setting = -1; ++ /* @TODO check if error can be returned */ ++ ttusbir->udev = usb_get_dev(interface_to_usbdev(intf)); ++ ttusbir->interf = intf; ++ ttusbir->last_pulse = 0x00; ++ ttusbir->last_num = 0; ++ ++ /* Now look for interface setting we can handle ++ We are searching for the alt setting where end point ++ 0x82 has max packet size 16 ++ */ ++ for (alt_set = 0; alt_set < intf->num_altsetting && !found; alt_set++) { ++ host_interf = &intf->altsetting[alt_set]; ++ interf_desc = &host_interf->desc; ++ for (endp = 0; endp < interf_desc->bNumEndpoints; endp++) { ++ host_endpoint = &host_interf->endpoint[endp]; ++ if ((host_endpoint->desc.bEndpointAddress == 0x82) && ++ (host_endpoint->desc.wMaxPacketSize == 0x10)) { ++ ttusbir->alt_setting = alt_set; ++ ttusbir->endpoint = endp; ++ found = 1; ++ break; ++ } ++ } ++ } ++ if (ttusbir->alt_setting != -1) ++ DPRINTK("alt setting: %d\n", ttusbir->alt_setting); ++ else { ++ err("Could not find alternate setting\n"); ++ kfree(ttusbir); ++ return -EINVAL; ++ } ++ ++ /* OK lets setup this interface setting */ ++ usb_set_interface(ttusbir->udev, 0, ttusbir->alt_setting); ++ ++ /* Store device info in interface structure */ ++ usb_set_intfdata(intf, ttusbir); ++ ++ /* Register as a LIRC plugin */ ++ if (lirc_buffer_init(&ttusbir->rbuf, sizeof(int), 256) < 0) { ++ err("Could not get memory for LIRC data buffer\n"); ++ usb_set_intfdata(intf, NULL); ++ kfree(ttusbir); ++ return -ENOMEM; ++ } ++ strcpy(ttusbir->plugin.name, "TTUSBIR"); ++ ttusbir->plugin.minor = -1; ++ ttusbir->plugin.code_length = 1; ++ ttusbir->plugin.sample_rate = 0; ++ ttusbir->plugin.data = ttusbir; ++ ttusbir->plugin.add_to_buf = NULL; ++ ttusbir->plugin.get_queue = NULL; ++ ttusbir->plugin.rbuf = &ttusbir->rbuf; ++ ttusbir->plugin.set_use_inc = set_use_inc; ++ ttusbir->plugin.set_use_dec = set_use_dec; ++ ttusbir->plugin.ioctl = NULL; ++ ttusbir->plugin.fops = NULL; ++ ttusbir->plugin.owner = THIS_MODULE; ++ ttusbir->plugin.features = LIRC_CAN_REC_MODE2; ++ ttusbir->minor = lirc_register_plugin(&ttusbir->plugin); ++ if (ttusbir->minor < 0) { ++ err("Error registering as LIRC plugin\n"); ++ usb_set_intfdata(intf, NULL); ++ lirc_buffer_free(&ttusbir->rbuf); ++ kfree(ttusbir); ++ return -EIO; ++ } ++ ++ /* Allocate and setup the URB that we will use to talk to the device */ ++ for (i = 0; i < num_urbs; i++) { ++ ttusbir->urb[i] = usb_alloc_urb(8, GFP_KERNEL); ++ if (!ttusbir->urb[i]) { ++ err("Could not allocate memory for the URB\n"); ++ for (j = i - 1; j >= 0; j--) ++ kfree(ttusbir->urb[j]); ++ lirc_buffer_free(&ttusbir->rbuf); ++ lirc_unregister_plugin(ttusbir->minor); ++ kfree(ttusbir); ++ usb_set_intfdata(intf, NULL); ++ return -ENOMEM; ++ } ++ ttusbir->urb[i]->dev = ttusbir->udev; ++ ttusbir->urb[i]->context = ttusbir; ++ ttusbir->urb[i]->pipe = usb_rcvisocpipe(ttusbir->udev, ++ ttusbir->endpoint); ++ ttusbir->urb[i]->interval = 1; ++ ttusbir->urb[i]->transfer_flags = URB_ISO_ASAP; ++ ttusbir->urb[i]->transfer_buffer = &ttusbir->buffer[i][0]; ++ ttusbir->urb[i]->complete = urb_complete; ++ ttusbir->urb[i]->number_of_packets = 8; ++ ttusbir->urb[i]->transfer_buffer_length = 128; ++ for (j = 0; j < 8; j++) { ++ ttusbir->urb[i]->iso_frame_desc[j].offset = j*16; ++ ttusbir->urb[i]->iso_frame_desc[j].length = 16; ++ } ++ } ++ return 0; ++} ++ ++/* Called when the driver is unloaded or the device is unplugged ++ */ ++static void disconnect(struct usb_interface *intf) ++{ ++ int i; ++ struct ttusbir_device *ttusbir; ++ ++ DPRINTK("Module ttusbir disconnect\n"); ++ ++ ttusbir = (struct ttusbir_device *) usb_get_intfdata(intf); ++ usb_set_intfdata(intf, NULL); ++ lirc_unregister_plugin(ttusbir->minor); ++ DPRINTK("unregistered\n"); ++ ++ for (i = 0; i < num_urbs; i++) { ++ usb_kill_urb(ttusbir->urb[i]); ++ usb_free_urb(ttusbir->urb[i]); ++ } ++ DPRINTK("URBs killed\n"); ++ lirc_buffer_free(&ttusbir->rbuf); ++ kfree(ttusbir); ++} ++ ++static int ttusbir_init_module(void) ++{ ++ int result; ++ ++ DPRINTK(KERN_DEBUG "Module ttusbir init\n"); ++ ++ /* register this driver with the USB subsystem */ ++ result = usb_register(&driver); ++ if (result) ++ err("usb_register failed. Error number %d", result); ++ return result; ++} ++ ++static void ttusbir_exit_module(void) ++{ ++ printk(KERN_DEBUG "Module ttusbir exit\n"); ++ /* deregister this driver with the USB subsystem */ ++ usb_deregister(&driver); ++} ++ ++#ifdef MODULE ++module_init(ttusbir_init_module); ++module_exit(ttusbir_exit_module); ++ ++#else ++subsys_initcall(ttusbir_init_module); ++ ++#endif /* MODULE */ +diff --git a/drivers/input/lirc/lirc_zilog.c b/drivers/input/lirc/lirc_zilog.c +new file mode 100644 +index 0000000..20e6b27 +--- /dev/null ++++ b/drivers/input/lirc/lirc_zilog.c +@@ -0,0 +1,1395 @@ ++/* ++ * i2c IR lirc plugin for devices with zilog IR processors ++ * ++ * Copyright (c) 2000 Gerd Knorr ++ * modified for PixelView (BT878P+W/FM) by ++ * Michal Kochanowicz ++ * Christoph Bartelmus ++ * modified for KNC ONE TV Station/Anubis Typhoon TView Tuner by ++ * Ulrich Mueller ++ * modified for Asus TV-Box and Creative/VisionTek BreakOut-Box by ++ * Stefan Jahn ++ * modified for inclusion into kernel sources by ++ * Jerome Brock ++ * modified for Leadtek Winfast PVR2000 by ++ * Thomas Reitmayr (treitmayr@yahoo.com) ++ * modified for Hauppauge PVR-150 IR TX device by ++ * Mark Weaver ++ * changed name from lirc_pvr150 to lirc_zilog, works on more than pvr-150 ++ * Jarod Wilson ++ * ++ * parts are cut&pasted from the lirc_i2c.c driver ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ++ * ++ */ ++ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++ ++#include "lirc_dev.h" ++#include "lirc.h" ++ ++struct IR { ++ struct lirc_plugin l; ++ ++ /* Device info */ ++ struct mutex lock; ++ int open; ++ ++ /* RX device */ ++ struct i2c_client c_rx; ++ ++ /* RX device buffer & lock */ ++ struct lirc_buffer buf; ++ struct mutex buf_lock; ++ ++ /* RX polling thread data */ ++ struct completion *t_notify; ++ struct completion *t_notify2; ++ int shutdown; ++ struct task_struct *task; ++ ++ /* RX read data */ ++ unsigned char b[3]; ++ ++ /* TX device */ ++ struct i2c_client c_tx; ++ int need_boot; ++ ++ /* # devices, for shutdown */ ++ int devs; ++}; ++ ++/* Minor -> data mapping */ ++static struct IR *ir_devices[MAX_IRCTL_DEVICES]; ++ ++/* Block size for IR transmitter */ ++#define TX_BLOCK_SIZE 99 ++ ++/* Hauppuage IR transmitter data */ ++struct tx_data_struct { ++ /* Boot block */ ++ unsigned char *boot_data; ++ ++ /* Start of binary data block */ ++ unsigned char *datap; ++ ++ /* End of binary data block */ ++ unsigned char *endp; ++ ++ /* Number of installed codesets */ ++ unsigned int num_code_sets; ++ ++ /* Pointers to codesets */ ++ unsigned char **code_sets; ++ ++ /* Global fixed data template */ ++ int fixed[TX_BLOCK_SIZE]; ++}; ++ ++static struct tx_data_struct *tx_data; ++struct mutex tx_data_lock; ++ ++/* ----------------------------------------------------------------------- */ ++ ++#define DEVICE_NAME "lirc_zilog" ++#define zilog_notify(s, args...) printk(KERN_NOTICE KBUILD_MODNAME ": " s, \ ++ ## args) ++#define zilog_error(s, args...) printk(KERN_ERR KBUILD_MODNAME ": " s, ## args) ++ ++/* ----------------------------------------------------------------------- */ ++/* insmod parameters */ ++ ++static int debug; /* debug output */ ++static int disable_rx; /* disable RX device */ ++static int disable_tx; /* disable TX device */ ++static int minor = -1; /* minor number */ ++ ++#define dprintk(fmt, args...) \ ++ do { \ ++ if (debug) \ ++ printk(KERN_DEBUG DEVICE_NAME ": " fmt, \ ++ ## args); \ ++ } while (0) ++ ++/* ----------------------------------------------------------------------- */ ++ ++static int add_to_buf(struct IR *ir) ++{ ++ __u16 code; ++ unsigned char codes[2]; ++ unsigned char keybuf[6]; ++ int got_data = 0; ++ int ret; ++ int failures = 0; ++ unsigned char sendbuf[1] = { 0 }; ++ ++ if (lirc_buffer_full(&ir->buf)) { ++ dprintk("buffer overflow\n"); ++ return -EOVERFLOW; ++ } ++ ++ /* service the device as long as it is returning ++ * data and we have space ++ */ ++ do { ++ /* Lock i2c bus for the duration. RX/TX chips interfere so ++ this is worth it ++ */ ++ mutex_lock(&ir->lock); ++ ++ /* Send random "poll command" (?) Windows driver does this ++ and it is a good point to detect chip failure. ++ */ ++ ret = i2c_master_send(&ir->c_rx, sendbuf, 1); ++ if (ret != 1) { ++ zilog_error("i2c_master_send failed with %d\n", ret); ++ if (failures >= 3) { ++ mutex_unlock(&ir->lock); ++ zilog_error("unable to read from the IR chip " ++ "after 3 resets, giving up\n"); ++ return ret; ++ } ++ ++ /* Looks like the chip crashed, reset it */ ++ zilog_error("polling the IR receiver chip failed, " ++ "trying reset\n"); ++ ++ set_current_state(TASK_UNINTERRUPTIBLE); ++ schedule_timeout((100 * HZ + 999) / 1000); ++ ir->need_boot = 1; ++ ++ ++failures; ++ mutex_unlock(&ir->lock); ++ continue; ++ } ++ ++ ret = i2c_master_recv(&ir->c_rx, keybuf, sizeof(keybuf)); ++ mutex_unlock(&ir->lock); ++ if (ret != sizeof(keybuf)) { ++ zilog_error("i2c_master_recv failed with %d -- " ++ "keeping last read buffer\n", ret); ++ } else { ++ ir->b[0] = keybuf[3]; ++ ir->b[1] = keybuf[4]; ++ ir->b[2] = keybuf[5]; ++ dprintk("key (0x%02x/0x%02x)\n", ir->b[0], ir->b[1]); ++ } ++ ++ /* key pressed ? */ ++ if ((ir->b[0] & 0x80) == 0) ++ return got_data ? 0 : -ENODATA; ++ ++ /* look what we have */ ++ code = (((__u16)ir->b[0]&0x7f)<<6) | (ir->b[1]>>2); ++ ++ codes[0] = (code >> 8) & 0xff; ++ codes[1] = code & 0xff; ++ ++ /* return it */ ++ lirc_buffer_write_1(&ir->buf, codes); ++ ++got_data; ++ } while (!lirc_buffer_full(&ir->buf)); ++ return 0; ++} ++ ++/* Main function of the polling thread -- from lirc_dev. ++ * We don't fit the LIRC model at all anymore. This is horrible, but ++ * basically we have a single RX/TX device with a nasty failure mode ++ * that needs to be accounted for across the pair. lirc lets us provide ++ * fops, but prevents us from using the internal polling, etc. if we do ++ * so. Hence the replication. Might be neater to extend the LIRC model ++ * to account for this but I'd think it's a very special case of seriously ++ * messed up hardware. ++ */ ++static int lirc_thread(void *arg) ++{ ++ struct IR *ir = arg; ++ ++ if (ir->t_notify != NULL) ++ complete(ir->t_notify); ++ ++ dprintk("poll thread started\n"); ++ ++ do { ++ if (ir->open) { ++ set_current_state(TASK_INTERRUPTIBLE); ++ ++ /* This is ~113*2 + 24 + jitter (2*repeat gap + ++ code length). We use this interval as the chip ++ resets every time you poll it (bad!). This is ++ therefore just sufficient to catch all of the ++ button presses. It makes the remote much more ++ responsive. You can see the difference by ++ running irw and holding down a button. With ++ 100ms, the old polling interval, you'll notice ++ breaks in the repeat sequence corresponding to ++ lost keypresses. ++ */ ++ schedule_timeout((260 * HZ) / 1000); ++ if (ir->shutdown) ++ break; ++ if (!add_to_buf(ir)) ++ wake_up_interruptible(&ir->buf.wait_poll); ++ } else { ++ /* if device not opened so we can sleep half a second */ ++ set_current_state(TASK_INTERRUPTIBLE); ++ schedule_timeout(HZ/2); ++ } ++ } while (!ir->shutdown); ++ ++ if (ir->t_notify2 != NULL) ++ wait_for_completion(ir->t_notify2); ++ ++ ir->task = NULL; ++ if (ir->t_notify != NULL) ++ complete(ir->t_notify); ++ ++ dprintk("poll thread ended\n"); ++ return 0; ++} ++ ++static int set_use_inc(void *data) ++{ ++ struct IR *ir = data; ++ ++ if (ir->l.owner == NULL || try_module_get(ir->l.owner) == 0) ++ return -ENODEV; ++ ++ /* lock bttv in memory while /dev/lirc is in use */ ++ /* this is completely broken code. lirc_unregister_plugin() ++ must be possible even when the device is open */ ++ if (ir->c_rx.addr) ++ i2c_use_client(&ir->c_rx); ++ if (ir->c_tx.addr) ++ i2c_use_client(&ir->c_tx); ++ ++ return 0; ++} ++ ++static void set_use_dec(void *data) ++{ ++ struct IR *ir = data; ++ ++ if (ir->c_rx.addr) ++ i2c_release_client(&ir->c_rx); ++ if (ir->c_tx.addr) ++ i2c_release_client(&ir->c_tx); ++ if (ir->l.owner != NULL) ++ module_put(ir->l.owner); ++} ++ ++/* safe read of a uint32 (always network byte order) */ ++static inline int read_uint32(unsigned char **data, ++ unsigned char *endp, unsigned int *val) ++{ ++ if (*data + 4 > endp) ++ return 0; ++ *val = ((*data)[0] << 24) | ((*data)[1] << 16) | ++ ((*data)[2] << 8) | (*data)[3]; ++ *data += 4; ++ return 1; ++} ++ ++/* safe read of a uint8 */ ++static inline int read_uint8(unsigned char **data, ++ unsigned char *endp, unsigned char *val) ++{ ++ if (*data + 1 > endp) ++ return 0; ++ *val = *((*data)++); ++ return 1; ++} ++ ++/* safe skipping of N bytes */ ++static inline int skip(unsigned char **data, ++ unsigned char *endp, unsigned int distance) ++{ ++ if (*data + distance > endp) ++ return 0; ++ *data += distance; ++ return 1; ++} ++ ++/* decompress key data into the given buffer */ ++static int get_key_data(unsigned char *buf, ++ unsigned int codeset, unsigned int key) ++{ ++ unsigned char *data, *endp, *diffs, *key_block; ++ unsigned char keys, ndiffs, id; ++ unsigned int base, lim, pos, i; ++ ++ /* Binary search for the codeset */ ++ for (base = 0, lim = tx_data->num_code_sets; lim; lim >>= 1) { ++ pos = base + (lim >> 1); ++ data = tx_data->code_sets[pos]; ++ ++ if (!read_uint32(&data, tx_data->endp, &i)) ++ goto corrupt; ++ ++ if (i == codeset) ++ break; ++ else if (codeset > i) { ++ base = pos + 1; ++ --lim; ++ } ++ } ++ /* Not found? */ ++ if (!lim) ++ return -EPROTO; ++ ++ /* Set end of data block */ ++ endp = pos < tx_data->num_code_sets - 1 ? ++ tx_data->code_sets[pos + 1] : tx_data->endp; ++ ++ /* Read the block header */ ++ if (!read_uint8(&data, endp, &keys) || ++ !read_uint8(&data, endp, &ndiffs) || ++ ndiffs > TX_BLOCK_SIZE || keys == 0) ++ goto corrupt; ++ ++ /* Save diffs & skip */ ++ diffs = data; ++ if (!skip(&data, endp, ndiffs)) ++ goto corrupt; ++ ++ /* Read the id of the first key */ ++ if (!read_uint8(&data, endp, &id)) ++ goto corrupt; ++ ++ /* Unpack the first key's data */ ++ for (i = 0; i < TX_BLOCK_SIZE; ++i) { ++ if (tx_data->fixed[i] == -1) { ++ if (!read_uint8(&data, endp, &buf[i])) ++ goto corrupt; ++ } else { ++ buf[i] = (unsigned char)tx_data->fixed[i]; ++ } ++ } ++ ++ /* Early out key found/not found */ ++ if (key == id) ++ return 0; ++ if (keys == 1) ++ return -EPROTO; ++ ++ /* Sanity check */ ++ key_block = data; ++ if (!skip(&data, endp, (keys - 1) * (ndiffs + 1))) ++ goto corrupt; ++ ++ /* Binary search for the key */ ++ for (base = 0, lim = keys - 1; lim; lim >>= 1) { ++ /* Seek to block */ ++ unsigned char *key_data; ++ pos = base + (lim >> 1); ++ key_data = key_block + (ndiffs + 1) * pos; ++ ++ if (*key_data == key) { ++ /* skip key id */ ++ ++key_data; ++ ++ /* found, so unpack the diffs */ ++ for (i = 0; i < ndiffs; ++i) { ++ unsigned char val; ++ if (!read_uint8(&key_data, endp, &val) || ++ diffs[i] >= TX_BLOCK_SIZE) ++ goto corrupt; ++ buf[diffs[i]] = val; ++ } ++ ++ return 0; ++ } else if (key > *key_data) { ++ base = pos + 1; ++ --lim; ++ } ++ } ++ /* Key not found */ ++ return -EPROTO; ++ ++corrupt: ++ zilog_error("firmware is corrupt\n"); ++ return -EFAULT; ++} ++ ++/* send a block of data to the IR TX device */ ++static int send_data_block(struct IR *ir, unsigned char *data_block) ++{ ++ int i, j, ret; ++ unsigned char buf[5]; ++ ++ for (i = 0; i < TX_BLOCK_SIZE;) { ++ int tosend = TX_BLOCK_SIZE - i; ++ if (tosend > 4) ++ tosend = 4; ++ buf[0] = (unsigned char)(i + 1); ++ for (j = 0; j < tosend; ++j) ++ buf[1 + j] = data_block[i + j]; ++ dprintk("%02x %02x %02x %02x %02x", ++ buf[0], buf[1], buf[2], buf[3], buf[4]); ++ ret = i2c_master_send(&ir->c_tx, buf, tosend + 1); ++ if (ret != tosend + 1) { ++ zilog_error("i2c_master_send failed with %d\n", ret); ++ return ret < 0 ? ret : -EFAULT; ++ } ++ i += tosend; ++ } ++ return 0; ++} ++ ++/* send boot data to the IR TX device */ ++static int send_boot_data(struct IR *ir) ++{ ++ int ret; ++ unsigned char buf[4]; ++ ++ /* send the boot block */ ++ ret = send_data_block(ir, tx_data->boot_data); ++ if (ret != 0) ++ return ret; ++ ++ /* kick it off? */ ++ buf[0] = 0x00; ++ buf[1] = 0x20; ++ ret = i2c_master_send(&ir->c_tx, buf, 2); ++ if (ret != 2) { ++ zilog_error("i2c_master_send failed with %d\n", ret); ++ return ret < 0 ? ret : -EFAULT; ++ } ++ ret = i2c_master_send(&ir->c_tx, buf, 1); ++ if (ret != 1) { ++ zilog_error("i2c_master_send failed with %d\n", ret); ++ return ret < 0 ? ret : -EFAULT; ++ } ++ ++ /* Here comes the firmware version... (hopefully) */ ++ ret = i2c_master_recv(&ir->c_tx, buf, 4); ++ if (ret != 4) { ++ zilog_error("i2c_master_recv failed with %d\n", ret); ++ return 0; ++ } ++ if (buf[0] != 0x80) { ++ zilog_error("unexpected IR TX response: %02x\n", buf[0]); ++ return 0; ++ } ++ zilog_notify("Zilog/Hauppauge IR blaster: firmware version " ++ "%d.%d.%d\n", buf[1], buf[2], buf[3]); ++ ++ return 0; ++} ++ ++/* unload "firmware", lock held */ ++static void fw_unload_locked(void) ++{ ++ if (tx_data) { ++ if (tx_data->code_sets) ++ vfree(tx_data->code_sets); ++ ++ if (tx_data->datap) ++ vfree(tx_data->datap); ++ ++ vfree(tx_data); ++ tx_data = NULL; ++ dprintk("successfully unloaded IR blaster firmware\n"); ++ } ++} ++ ++/* unload "firmware" for the IR TX device */ ++static void fw_unload(void) ++{ ++ mutex_lock(&tx_data_lock); ++ fw_unload_locked(); ++ mutex_unlock(&tx_data_lock); ++} ++ ++/* load "firmware" for the IR TX device */ ++static int fw_load(struct IR *ir) ++{ ++ int ret; ++ unsigned int i; ++ unsigned char *data, version, num_global_fixed; ++ const struct firmware *fw_entry = NULL; ++ ++ /* Already loaded? */ ++ mutex_lock(&tx_data_lock); ++ if (tx_data) { ++ ret = 0; ++ goto out; ++ } ++ ++ /* Request codeset data file */ ++ ret = request_firmware(&fw_entry, "haup-ir-blaster.bin", &ir->c_tx.dev); ++ if (ret != 0) { ++ zilog_error("firmware haup-ir-blaster.bin not available " ++ "(%d)\n", ret); ++ ret = ret < 0 ? ret : -EFAULT; ++ goto out; ++ } ++ zilog_notify("firmware of size %zu loaded\n", fw_entry->size); ++ ++ /* Parse the file */ ++ tx_data = vmalloc(sizeof(*tx_data)); ++ if (tx_data == NULL) { ++ zilog_error("out of memory\n"); ++ release_firmware(fw_entry); ++ ret = -ENOMEM; ++ goto out; ++ } ++ tx_data->code_sets = NULL; ++ ++ /* Copy the data so hotplug doesn't get confused and timeout */ ++ tx_data->datap = vmalloc(fw_entry->size); ++ if (tx_data->datap == NULL) { ++ zilog_error("out of memory\n"); ++ release_firmware(fw_entry); ++ vfree(tx_data); ++ ret = -ENOMEM; ++ goto out; ++ } ++ memcpy(tx_data->datap, fw_entry->data, fw_entry->size); ++ tx_data->endp = tx_data->datap + fw_entry->size; ++ release_firmware(fw_entry); fw_entry = NULL; ++ ++ /* Check version */ ++ data = tx_data->datap; ++ if (!read_uint8(&data, tx_data->endp, &version)) ++ goto corrupt; ++ if (version != 1) { ++ zilog_error("unsupported code set file version (%u, expected" ++ "1) -- please upgrade to a newer driver", ++ version); ++ fw_unload_locked(); ++ ret = -EFAULT; ++ goto out; ++ } ++ ++ /* Save boot block for later */ ++ tx_data->boot_data = data; ++ if (!skip(&data, tx_data->endp, TX_BLOCK_SIZE)) ++ goto corrupt; ++ ++ if (!read_uint32(&data, tx_data->endp, ++ &tx_data->num_code_sets)) ++ goto corrupt; ++ ++ zilog_notify("%u codesets loaded\n", tx_data->num_code_sets); ++ ++ tx_data->code_sets = vmalloc( ++ tx_data->num_code_sets * sizeof(char *)); ++ if (tx_data->code_sets == NULL) { ++ fw_unload_locked(); ++ ret = -ENOMEM; ++ goto out; ++ } ++ ++ for (i = 0; i < TX_BLOCK_SIZE; ++i) ++ tx_data->fixed[i] = -1; ++ ++ /* Read global fixed data template */ ++ if (!read_uint8(&data, tx_data->endp, &num_global_fixed) || ++ num_global_fixed > TX_BLOCK_SIZE) ++ goto corrupt; ++ for (i = 0; i < num_global_fixed; ++i) { ++ unsigned char pos, val; ++ if (!read_uint8(&data, tx_data->endp, &pos) || ++ !read_uint8(&data, tx_data->endp, &val) || ++ pos >= TX_BLOCK_SIZE) ++ goto corrupt; ++ tx_data->fixed[pos] = (int)val; ++ } ++ ++ /* Filch out the position of each code set */ ++ for (i = 0; i < tx_data->num_code_sets; ++i) { ++ unsigned int id; ++ unsigned char keys; ++ unsigned char ndiffs; ++ ++ /* Save the codeset position */ ++ tx_data->code_sets[i] = data; ++ ++ /* Read header */ ++ if (!read_uint32(&data, tx_data->endp, &id) || ++ !read_uint8(&data, tx_data->endp, &keys) || ++ !read_uint8(&data, tx_data->endp, &ndiffs) || ++ ndiffs > TX_BLOCK_SIZE || keys == 0) ++ goto corrupt; ++ ++ /* skip diff positions */ ++ if (!skip(&data, tx_data->endp, ndiffs)) ++ goto corrupt; ++ ++ /* After the diffs we have the first key id + data - ++ global fixed */ ++ if (!skip(&data, tx_data->endp, ++ 1 + TX_BLOCK_SIZE - num_global_fixed)) ++ goto corrupt; ++ ++ /* Then we have keys-1 blocks of key id+diffs */ ++ if (!skip(&data, tx_data->endp, ++ (ndiffs + 1) * (keys - 1))) ++ goto corrupt; ++ } ++ ret = 0; ++ goto out; ++ ++corrupt: ++ zilog_error("firmware is corrupt\n"); ++ fw_unload_locked(); ++ ret = -EFAULT; ++ ++out: ++ mutex_unlock(&tx_data_lock); ++ return ret; ++} ++ ++/* initialise the IR TX device */ ++static int tx_init(struct IR *ir) ++{ ++ int ret; ++ ++ /* Load 'firmware' */ ++ ret = fw_load(ir); ++ if (ret != 0) ++ return ret; ++ ++ /* Send boot block */ ++ ret = send_boot_data(ir); ++ if (ret != 0) ++ return ret; ++ ir->need_boot = 0; ++ ++ /* Looks good */ ++ return 0; ++} ++ ++/* do nothing stub to make LIRC happy */ ++static loff_t lseek(struct file *filep, loff_t offset, int orig) ++{ ++ return -ESPIPE; ++} ++ ++/* copied from lirc_dev */ ++static ssize_t read(struct file *filep, char *outbuf, size_t n, loff_t *ppos) ++{ ++ struct IR *ir = (struct IR *)filep->private_data; ++ unsigned char buf[ir->buf.chunk_size]; ++ int ret = 0, written = 0; ++ DECLARE_WAITQUEUE(wait, current); ++ ++ dprintk("read called\n"); ++ if (ir->c_rx.addr == 0) ++ return -ENODEV; ++ ++ if (mutex_lock_interruptible(&ir->buf_lock)) ++ return -ERESTARTSYS; ++ ++ if (n % ir->buf.chunk_size) { ++ dprintk("read result = -EINVAL\n"); ++ mutex_unlock(&ir->buf_lock); ++ return -EINVAL; ++ } ++ ++ /* we add ourselves to the task queue before buffer check ++ * to avoid losing scan code (in case when queue is awaken somewhere ++ * beetwen while condition checking and scheduling) ++ */ ++ add_wait_queue(&ir->buf.wait_poll, &wait); ++ set_current_state(TASK_INTERRUPTIBLE); ++ ++ /* while we did't provide 'length' bytes, device is opened in blocking ++ * mode and 'copy_to_user' is happy, wait for data. ++ */ ++ while (written < n && ret == 0) { ++ if (lirc_buffer_empty(&ir->buf)) { ++ /* According to the read(2) man page, 'written' can be ++ * returned as less than 'n', instead of blocking ++ * again, returning -EWOULDBLOCK, or returning ++ * -ERESTARTSYS */ ++ if (written) ++ break; ++ if (filep->f_flags & O_NONBLOCK) { ++ ret = -EWOULDBLOCK; ++ break; ++ } ++ if (signal_pending(current)) { ++ ret = -ERESTARTSYS; ++ break; ++ } ++ schedule(); ++ set_current_state(TASK_INTERRUPTIBLE); ++ } else { ++ lirc_buffer_read_1(&ir->buf, buf); ++ ret = copy_to_user((void *)outbuf+written, buf, ++ ir->buf.chunk_size); ++ written += ir->buf.chunk_size; ++ } ++ } ++ ++ remove_wait_queue(&ir->buf.wait_poll, &wait); ++ set_current_state(TASK_RUNNING); ++ mutex_unlock(&ir->buf_lock); ++ ++ dprintk("read result = %s (%d)\n", ++ ret ? "-EFAULT" : "OK", ret); ++ ++ return ret ? ret : written; ++} ++ ++/* send a keypress to the IR TX device */ ++static int send_code(struct IR *ir, unsigned int code, unsigned int key) ++{ ++ unsigned char data_block[TX_BLOCK_SIZE]; ++ unsigned char buf[2]; ++ int i, ret; ++ ++ /* Get data for the codeset/key */ ++ ret = get_key_data(data_block, code, key); ++ ++ if (ret == -EPROTO) { ++ zilog_error("failed to get data for code %u, key %u -- check " ++ "lircd.conf entries\n", code, key); ++ return ret; ++ } else if (ret != 0) ++ return ret; ++ ++ /* Send the data block */ ++ ret = send_data_block(ir, data_block); ++ if (ret != 0) ++ return ret; ++ ++ /* Send data block length? */ ++ buf[0] = 0x00; ++ buf[1] = 0x40; ++ ret = i2c_master_send(&ir->c_tx, buf, 2); ++ if (ret != 2) { ++ zilog_error("i2c_master_send failed with %d\n", ret); ++ return ret < 0 ? ret : -EFAULT; ++ } ++ ret = i2c_master_send(&ir->c_tx, buf, 1); ++ if (ret != 1) { ++ zilog_error("i2c_master_send failed with %d\n", ret); ++ return ret < 0 ? ret : -EFAULT; ++ } ++ ++ /* Send finished download? */ ++ ret = i2c_master_recv(&ir->c_tx, buf, 1); ++ if (ret != 1) { ++ zilog_error("i2c_master_recv failed with %d\n", ret); ++ return ret < 0 ? ret : -EFAULT; ++ } ++ if (buf[0] != 0xA0) { ++ zilog_error("unexpected IR TX response #1: %02x\n", ++ buf[0]); ++ return ret < 0 ? ret : -EFAULT; ++ } ++ ++ /* Send prepare command? */ ++ buf[0] = 0x00; ++ buf[1] = 0x80; ++ ret = i2c_master_send(&ir->c_tx, buf, 2); ++ if (ret != 2) { ++ zilog_error("i2c_master_send failed with %d\n", ret); ++ return ret < 0 ? ret : -EFAULT; ++ } ++ ++ /* This bit NAKs until the device is ready, so we retry it ++ sleeping a bit each time. This seems to be what the windows ++ driver does, approximately. ++ Try for up to 1s. ++ */ ++ for (i = 0; i < 20; ++i) { ++ set_current_state(TASK_UNINTERRUPTIBLE); ++ schedule_timeout((50 * HZ + 999) / 1000); ++ ret = i2c_master_send(&ir->c_tx, buf, 1); ++ if (ret == 1) ++ break; ++ dprintk("NAK expected: i2c_master_send " ++ "failed with %d (try %d)\n", ret, i+1); ++ } ++ if (ret != 1) { ++ zilog_error("IR TX chip never got ready: last i2c_master_send " ++ "failed with %d\n", ret); ++ return ret < 0 ? ret : -EFAULT; ++ } ++ ++ /* Seems to be an 'ok' response */ ++ i = i2c_master_recv(&ir->c_tx, buf, 1); ++ if (i != 1) { ++ zilog_error("i2c_master_recv failed with %d\n", ret); ++ return ret < 0 ? ret : -EFAULT; ++ } ++ if (buf[0] != 0x80) { ++ zilog_error("unexpected IR TX response #2: %02x\n", buf[0]); ++ return -EFAULT; ++ } ++ ++ /* Oh good, it worked */ ++ dprintk("sent code %u, key %u\n", code, key); ++ return 0; ++} ++ ++/* ++ * Write a code to the device. We take in a 32-bit number (an int) and then ++ * decode this to a codeset/key index. The key data is then decompressed and ++ * sent to the device. We have a spin lock as per i2c documentation to prevent ++ * multiple concurrent sends which would probably cause the device to explode. ++ */ ++static ssize_t write(struct file *filep, const char *buf, size_t n, ++ loff_t *ppos) ++{ ++ struct IR *ir = (struct IR *)filep->private_data; ++ size_t i; ++ int failures = 0; ++ ++ if (ir->c_tx.addr == 0) ++ return -ENODEV; ++ ++ /* Validate user parameters */ ++ if (n % sizeof(int)) ++ return -EINVAL; ++ ++ /* Lock i2c bus for the duration */ ++ mutex_lock(&ir->lock); ++ ++ /* Send each keypress */ ++ for (i = 0; i < n;) { ++ int ret = 0; ++ int command; ++ ++ if (copy_from_user(&command, buf + i, sizeof(command))) { ++ mutex_unlock(&ir->lock); ++ return -EFAULT; ++ } ++ ++ /* Send boot data first if required */ ++ if (ir->need_boot == 1) { ++ ret = send_boot_data(ir); ++ if (ret == 0) ++ ir->need_boot = 0; ++ } ++ ++ /* Send the code */ ++ if (ret == 0) { ++ ret = send_code(ir, (unsigned)command >> 16, ++ (unsigned)command & 0xFFFF); ++ if (ret == -EPROTO) { ++ mutex_unlock(&ir->lock); ++ return ret; ++ } ++ } ++ ++ /* Hmm, a failure. If we've had a few then give up, otherwise ++ try a reset ++ */ ++ if (ret != 0) { ++ /* Looks like the chip crashed, reset it */ ++ zilog_error("sending to the IR transmitter chip " ++ "failed, trying reset\n"); ++ ++ if (failures >= 3) { ++ zilog_error("unable to send to the IR chip " ++ "after 3 resets, giving up\n"); ++ mutex_unlock(&ir->lock); ++ return ret; ++ } ++ set_current_state(TASK_UNINTERRUPTIBLE); ++ schedule_timeout((100 * HZ + 999) / 1000); ++ ir->need_boot = 1; ++ ++failures; ++ } else ++ i += sizeof(int); ++ } ++ ++ /* Release i2c bus */ ++ mutex_unlock(&ir->lock); ++ ++ /* All looks good */ ++ return n; ++} ++ ++/* copied from lirc_dev */ ++static unsigned int poll(struct file *filep, poll_table *wait) ++{ ++ struct IR *ir = (struct IR *)filep->private_data; ++ unsigned int ret; ++ ++ dprintk("poll called\n"); ++ if (ir->c_rx.addr == 0) ++ return -ENODEV; ++ ++ mutex_lock(&ir->buf_lock); ++ ++ poll_wait(filep, &ir->buf.wait_poll, wait); ++ ++ dprintk("poll result = %s\n", ++ lirc_buffer_empty(&ir->buf) ? "0" : "POLLIN|POLLRDNORM"); ++ ++ ret = lirc_buffer_empty(&ir->buf) ? 0 : (POLLIN|POLLRDNORM); ++ ++ mutex_unlock(&ir->buf_lock); ++ return ret; ++} ++ ++static int ioctl(struct inode *node, struct file *filep, unsigned int cmd, ++ unsigned long arg) ++{ ++ struct IR *ir = (struct IR *)filep->private_data; ++ int result; ++ unsigned long mode, features = 0; ++ ++ if (ir->c_rx.addr != 0) ++ features |= LIRC_CAN_REC_LIRCCODE; ++ if (ir->c_tx.addr != 0) ++ features |= LIRC_CAN_SEND_PULSE; ++ ++ switch (cmd) { ++ case LIRC_GET_LENGTH: ++ result = put_user((unsigned long)13, ++ (unsigned long *)arg); ++ break; ++ case LIRC_GET_FEATURES: ++ result = put_user(features, (unsigned long *) arg); ++ if (result) ++ return result; ++ break; ++ case LIRC_GET_REC_MODE: ++ if (!(features&LIRC_CAN_REC_MASK)) ++ return -ENOSYS; ++ ++ result = put_user(LIRC_REC2MODE ++ (features&LIRC_CAN_REC_MASK), ++ (unsigned long *)arg); ++ break; ++ case LIRC_SET_REC_MODE: ++ if (!(features&LIRC_CAN_REC_MASK)) ++ return -ENOSYS; ++ ++ result = get_user(mode, (unsigned long *)arg); ++ if (!result && !(LIRC_MODE2REC(mode) & features)) ++ result = -EINVAL; ++ break; ++ case LIRC_GET_SEND_MODE: ++ if (!(features&LIRC_CAN_SEND_MASK)) ++ return -ENOSYS; ++ ++ result = put_user(LIRC_MODE_PULSE, (unsigned long *) arg); ++ if (result) ++ return result; ++ break; ++ case LIRC_SET_SEND_MODE: ++ if (!(features&LIRC_CAN_SEND_MASK)) ++ return -ENOSYS; ++ ++ result = get_user(mode, (unsigned long *) arg); ++ if (result) ++ return result; ++ if (mode != LIRC_MODE_PULSE) ++ return -EINVAL; ++ break; ++ default: ++ return -ENOIOCTLCMD; ++ } ++ return 0; ++} ++ ++/* Open the IR device. Get hold of our IR structure and ++ stash it in private_data for the file */ ++static int open(struct inode *node, struct file *filep) ++{ ++ struct IR *ir; ++ int ret; ++ ++ /* find our IR struct */ ++ unsigned minor = MINOR(node->i_rdev); ++ if (minor >= MAX_IRCTL_DEVICES) { ++ dprintk("minor %d: open result = -ENODEV\n", ++ minor); ++ return -ENODEV; ++ } ++ ir = ir_devices[minor]; ++ ++ /* increment in use count */ ++ mutex_lock(&ir->lock); ++ ++ir->open; ++ ret = set_use_inc(ir); ++ if (ret != 0) { ++ --ir->open; ++ mutex_unlock(&ir->lock); ++ return ret; ++ } ++ mutex_unlock(&ir->lock); ++ ++ /* stash our IR struct */ ++ filep->private_data = ir; ++ ++ return 0; ++} ++ ++/* Close the IR device */ ++static int close(struct inode *node, struct file *filep) ++{ ++ /* find our IR struct */ ++ struct IR *ir = (struct IR *)filep->private_data; ++ if (ir == NULL) { ++ zilog_error("close: no private_data attached to the file!\n"); ++ return -ENODEV; ++ } ++ ++ /* decrement in use count */ ++ mutex_lock(&ir->lock); ++ --ir->open; ++ set_use_dec(ir); ++ mutex_unlock(&ir->lock); ++ ++ return 0; ++} ++ ++static struct lirc_plugin lirc_template = { ++ .name = "lirc_zilog", ++ .set_use_inc = set_use_inc, ++ .set_use_dec = set_use_dec, ++ .owner = THIS_MODULE ++}; ++ ++/* ----------------------------------------------------------------------- */ ++ ++static int ir_attach(struct i2c_adapter *adap, int have_rx, int have_tx); ++static int ir_detach(struct i2c_client *client); ++static int ir_probe(struct i2c_adapter *adap); ++static int ir_command(struct i2c_client *client, unsigned int cmd, void *arg); ++ ++static struct i2c_driver driver = { ++ .driver = { ++ .owner = THIS_MODULE, ++ .name = "i2c ir driver", ++ }, ++ .attach_adapter = ir_probe, ++ .detach_client = ir_detach, ++ .command = ir_command, ++}; ++ ++static struct i2c_client client_template = { ++ .name = "unset", ++ .driver = &driver ++}; ++ ++static struct file_operations lirc_fops = { ++ .llseek = lseek, ++ .read = read, ++ .write = write, ++ .poll = poll, ++ .ioctl = ioctl, ++ .open = open, ++ .release = close ++}; ++ ++static int i2c_attach(struct i2c_client *client, struct IR *ir) ++{ ++ int ret; ++ ++ i2c_set_clientdata(client, ir); ++ ++ ret = i2c_attach_client(client); ++ if (ret != 0) { ++ client->addr = 0; ++ return ret; ++ } ++ if (!i2c_use_client(client)) { ++ i2c_detach_client(client); ++ client->addr = 0; ++ return -EFAULT; ++ } ++ ++ir->devs; ++ return 0; ++} ++ ++static int ir_attach(struct i2c_adapter *adap, int have_rx, int have_tx) ++{ ++ struct IR *ir; ++ int ret, i; ++ ++ printk("lirc_zilog: chip found with %s\n", ++ have_rx && have_tx ? "RX and TX" : ++ have_rx ? "RX only" : "TX only"); ++ ++ ir = kmalloc(sizeof(struct IR), GFP_KERNEL); ++ if (ir == NULL) ++ return -ENOMEM; ++ if (lirc_buffer_init(&ir->buf, 2, BUFLEN/2) != 0) { ++ kfree(ir); ++ return -ENOMEM; ++ } ++ mutex_init(&ir->lock); ++ mutex_init(&ir->buf_lock); ++ ir->open = 0; ++ ir->devs = 0; ++ ir->task = NULL; ++ ir->need_boot = 1; ++ ir->shutdown = 0; ++ ir->t_notify = ir->t_notify2 = NULL; ++ for (i = 0; i < sizeof(ir->b); ++i) ++ ir->b[0] = 0; ++ ++ memcpy(&ir->l, &lirc_template, sizeof(struct lirc_plugin)); ++ ir->l.minor = -1; ++ ++ /* initialise RX device */ ++ client_template.adapter = adap; ++ memcpy(&ir->c_rx, &client_template, sizeof(struct i2c_client)); ++ if (have_rx) { ++ DECLARE_COMPLETION(tn); ++ ++ /* I2C attach to device */ ++ ir->c_rx.addr = 0x71; ++ strncpy(ir->c_rx.name, "Zilog/Hauppauge RX", I2C_NAME_SIZE); ++ ret = i2c_attach(&ir->c_rx, ir); ++ if (ret != 0) ++ goto err; ++ ++ /* try to fire up polling thread */ ++ ir->t_notify = &tn; ++ ir->task = kthread_run(lirc_thread, ir, "lirc_zilog"); ++ ret = PTR_ERR(ir->task); ++ if (ret <= 0) { ++ zilog_error("lirc_register_plugin: cannot run " ++ "poll thread\n"); ++ goto err; ++ } ++ wait_for_completion(&tn); ++ ir->t_notify = NULL; ++ } ++ ++ /* initialise TX device */ ++ memcpy(&ir->c_tx, &client_template, sizeof(struct i2c_client)); ++ if (have_tx) { ++ /* I2C attach to device */ ++ ir->c_tx.addr = 0x70; ++ strncpy(ir->c_tx.name, "Zilog/Hauppauge TX", I2C_NAME_SIZE); ++ ret = i2c_attach(&ir->c_tx, ir); ++ if (ret != 0) ++ goto err; ++ } ++ ++ /* set lirc_dev stuff */ ++ ir->l.code_length = 13; ++ ir->l.rbuf = &ir->buf; ++ ir->l.fops = &lirc_fops; ++ ir->l.data = ir; ++ ir->l.minor = minor; ++ ir->l.sample_rate = 0; ++ ++ /* register with lirc */ ++ ir->l.minor = lirc_register_plugin(&ir->l); ++ if (ir->l.minor < 0 || ir->l.minor >= MAX_IRCTL_DEVICES) { ++ zilog_error("ir_attach: \"minor\" must be between 0 and %d " ++ "(%d)!\n", MAX_IRCTL_DEVICES-1, ir->l.minor); ++ ret = -EBADRQC; ++ goto err; ++ } ++ ++ /* store this for getting back in open() later on */ ++ ir_devices[ir->l.minor] = ir; ++ ++ /* if we have the tx device, load the 'firmware'. We do this ++ after registering with lirc as otherwise hotplug seems to take ++ 10s to create the lirc device. ++ */ ++ if (have_tx) { ++ /* Special TX init */ ++ ret = tx_init(ir); ++ if (ret != 0) ++ goto err; ++ } ++ return 0; ++ ++err: ++ /* undo everything, hopefully... */ ++ if (ir->c_rx.addr) ++ ir_detach(&ir->c_rx); ++ if (ir->c_tx.addr) ++ ir_detach(&ir->c_tx); ++ return ret; ++} ++ ++static int ir_detach(struct i2c_client *client) ++{ ++ struct IR *ir = i2c_get_clientdata(client); ++ mutex_lock(&ir->lock); ++ ++ if (client == &ir->c_rx) { ++ DECLARE_COMPLETION(tn); ++ DECLARE_COMPLETION(tn2); ++ ++ /* end up polling thread */ ++ if (ir->task && !IS_ERR(ir->task)) { ++ ir->t_notify = &tn; ++ ir->t_notify2 = &tn2; ++ ir->shutdown = 1; ++ wake_up_process(ir->task); ++ complete(&tn2); ++ wait_for_completion(&tn); ++ ir->t_notify = NULL; ++ ir->t_notify2 = NULL; ++ } ++ ++ /* unregister device */ ++ i2c_detach_client(&ir->c_rx); ++ } else if (client == &ir->c_tx) { ++ i2c_detach_client(&ir->c_tx); ++ } else { ++ mutex_unlock(&ir->lock); ++ zilog_error("ir_detach: detached from something we didn't " ++ "attach to\n"); ++ return -ENODEV; ++ } ++ ++ --ir->devs; ++ if (ir->devs < 0) { ++ mutex_unlock(&ir->lock); ++ zilog_error("ir_detach: invalid device count\n"); ++ return -ENODEV; ++ } else if (ir->devs == 0) { ++ /* unregister lirc plugin */ ++ if (ir->l.minor >= 0 && ir->l.minor < MAX_IRCTL_DEVICES) { ++ lirc_unregister_plugin(ir->l.minor); ++ ir_devices[ir->l.minor] = NULL; ++ } ++ ++ /* free memory */ ++ lirc_buffer_free(&ir->buf); ++ mutex_unlock(&ir->lock); ++ kfree(ir); ++ return 0; ++ } ++ mutex_unlock(&ir->lock); ++ return 0; ++} ++ ++static int ir_probe(struct i2c_adapter *adap) ++{ ++ struct i2c_client c; ++ char buf; ++ ++ if (adap->id == I2C_HW_B_BT848 || ++ adap->id == I2C_HW_B_CX2341X) { ++ int have_rx = 0, have_tx = 0; ++ ++ /* ++ * The external IR receiver is at i2c address 0x71. ++ * The IR transmitter is at 0x70. ++ */ ++ memset(&c, 0, sizeof(c)); ++ c.adapter = adap; ++ c.addr = 0x70; ++ ++ if (!disable_rx) { ++ if (i2c_master_recv(&c, &buf, 1) == 1) ++ have_rx = 1; ++ dprintk("probe 0x70 @ %s: %s\n", ++ adap->name, ++ have_rx ? "yes" : "no"); ++ } ++ ++ if (!disable_tx) { ++ c.addr = 0x71; ++ if (i2c_master_recv(&c, &buf, 1) == 1) ++ have_tx = 1; ++ dprintk("probe 0x71 @ %s: %s\n", ++ adap->name, ++ have_tx ? "yes" : "no"); ++ } ++ ++ if (have_rx || have_tx) ++ return ir_attach(adap, have_rx, have_tx); ++ else ++ zilog_error("%s: no devices found\n", adap->name); ++ } ++ ++ return 0; ++} ++ ++static int ir_command(struct i2c_client *client, unsigned int cmd, void *arg) ++{ ++ /* nothing */ ++ return 0; ++} ++ ++/* ----------------------------------------------------------------------- */ ++#ifdef MODULE ++ ++int init_module(void) ++{ ++ mutex_init(&tx_data_lock); ++ request_module("ivtv"); ++ request_module("firmware_class"); ++ i2c_add_driver(&driver); ++ return 0; ++} ++ ++void cleanup_module(void) ++{ ++ i2c_del_driver(&driver); ++ /* if loaded */ ++ fw_unload(); ++} ++ ++MODULE_DESCRIPTION("Zilog/Hauppauge infrared transmitter driver (i2c stack)"); ++MODULE_AUTHOR("Gerd Knorr, Michal Kochanowicz, Christoph Bartelmus, " ++ "Ulrich Mueller, Stefan Jahn, Jerome Brock, Mark Weaver"); ++MODULE_LICENSE("GPL"); ++/* for compat with old name, which isn't all that accurate anymore */ ++MODULE_ALIAS("lirc_pvr150"); ++ ++module_param(minor, int, 0444); ++MODULE_PARM_DESC(minor, "Preferred minor device number"); ++ ++module_param(debug, bool, 0644); ++MODULE_PARM_DESC(debug, "Enable debugging messages"); ++ ++module_param(disable_rx, bool, 0644); ++MODULE_PARM_DESC(disable_rx, "Disable the IR receiver device"); ++ ++module_param(disable_tx, bool, 0644); ++MODULE_PARM_DESC(disable_tx, "Disable the IR transmitter device"); ++ ++#endif /* MODULE */ ++ ++/* ++ * Overrides for Emacs so that we follow Linus's tabbing style. ++ * --------------------------------------------------------------------------- ++ * Local variables: ++ * c-basic-offset: 8 ++ * End: ++ */ diff --git a/sys-kernel/geos_one-sources/files/linux-sabayon-2.6.28-ignore-mouse-interface-on-macbooks.patch b/sys-kernel/geos_one-sources/files/linux-sabayon-2.6.28-ignore-mouse-interface-on-macbooks.patch new file mode 100644 index 00000000..99dbbad2 --- /dev/null +++ b/sys-kernel/geos_one-sources/files/linux-sabayon-2.6.28-ignore-mouse-interface-on-macbooks.patch @@ -0,0 +1,30 @@ +From: Jiri Kosina +Date: Thu, 20 Nov 2008 10:27:02 +0000 (+0100) +Subject: HID: ignore mouse interface for unibody macbooks +X-Git-Tag: v2.6.29-rc1~498^2~18 +X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=ac26fca3e14c8882e382daa7e96ab73e0186cf03 + +HID: ignore mouse interface for unibody macbooks + +The mouse interface on unibody macbooks is going to be handled by +bcm59743 driver in 2.6.29. + +Reported-by: Henrik Rydberg +Signed-off-by: Jiri Kosina +--- + +diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c +index 40df3e1..839de38 100644 +--- a/drivers/hid/hid-core.c ++++ b/drivers/hid/hid-core.c +@@ -1577,6 +1577,9 @@ static const struct hid_device_id hid_mouse_ignore_list[] = { + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ISO) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_JIS) }, ++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI) }, ++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ISO) }, ++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_JIS) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) }, + { } + diff --git a/sys-kernel/geos_one-sources/geos_one-sources-2.6.28-r1.ebuild b/sys-kernel/geos_one-sources/geos_one-sources-2.6.28-r1.ebuild index 3d8fb5ef..8bc9770d 100644 --- a/sys-kernel/geos_one-sources/geos_one-sources-2.6.28-r1.ebuild +++ b/sys-kernel/geos_one-sources/geos_one-sources-2.6.28-r1.ebuild @@ -80,7 +80,16 @@ src_unpack() { epatch ${FILESDIR}/linux-2.6-x86-tune-generic.patch epatch ${FILESDIR}/linux-2.6-defaults-fat-utf8.patch epatch ${FILESDIR}/linux-2.6.27-lirc.patch - + epatch ${FILESDIR}/hz-432-kconfig-option.patch + epatch ${FILESDIR}/hz-864-kconfig-option.patch + epatch ${FILESDIR}/enable-4k-stacks-default-2.6.24.patch + epatch ${FILESDIR}/drm-next.patch + epatch ${FILESDIR}/drm-modesetting-radeon.patch + epatch ${FILESDIR}/drm-nouveau.patch + epatch ${FILESDIR}/1-bcm5974-headers.patch + epatch ${FILESDIR}/2-bcm5974-quad-finger-tapping.patch + epatch ${FILESDIR}/3-bcm5974-macbook5-support.patch + epatch ${FILESDIR}/linux-sabayon-2.6.28-ignore-mouse-interface-on-macbooks.patch find ${WORKDIR} -iname "*.orig" -exec rm {} \; 2> /dev/null