-
Notifications
You must be signed in to change notification settings - Fork 180
/
SSDTTime.py
executable file
·3395 lines (3315 loc) · 148 KB
/
SSDTTime.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
from Scripts import dsdt, plist, reveal, run, utils
import getpass, os, tempfile, shutil, plistlib, sys, binascii, zipfile, re, string, json, textwrap
class SSDT:
def __init__(self, **kwargs):
self.u = utils.Utils("SSDT Time")
self.r = run.Run()
self.re = reveal.Reveal()
try:
self.d = dsdt.DSDT()
except Exception as e:
print("Something went wrong :( - Aborting!\n - {}".format(e))
exit(1)
self.w = 80
self.h = 24
self.red = "\u001b[41;1m"
self.yel = "\u001b[43;1m"
self.grn = "\u001b[42;1m"
self.blu = "\u001b[46;1m"
self.rst = "\u001b[0m"
self.copy_as_path = self.u.check_admin() if os.name=="nt" else False
if 2/3==0:
# ANSI escapes don't seem to work properly with python 2.x
self.red = self.yel = self.grn = self.blu = self.rst = ""
if os.name == "nt":
if 2/3!=0:
os.system("color") # Allow ASNI color escapes.
self.w = 120
self.h = 30
self.iasl_legacy = False
self.resize_window = True
# Set up match mode approach:
# 0 = Any table id, any length
# 1 = Any table id, match length
# 2 = Match table id, match length
# 3 = Match NORMALIZED table id, match length
self.match_mode = 0
self.match_dict = {
0:"{}Least Strict{}".format(self.red,self.rst),
1:"{}Length Only{}".format(self.yel,self.rst),
2:"{}Table Ids and Length{}".format(self.grn,self.rst),
3:"{}Table Ids and Length (NormalizeHeaders){}".format(self.blu,self.rst)
}
self.dsdt = None
self.settings = os.path.join(os.path.dirname(os.path.realpath(__file__)),"Scripts","settings.json")
if os.path.exists(self.settings):
self.load_settings()
self.output = "Results"
self.target_irqs = [0,2,8,11]
self.illegal_names = ("XHC1","EHC1","EHC2","PXSX")
# _OSI Strings found here: https://learn.microsoft.com/en-us/windows-hardware/drivers/acpi/winacpi-osi
self.osi_strings = {
"Windows 2000": "Windows 2000",
"Windows XP": "Windows 2001",
"Windows XP SP1": "Windows 2001 SP1",
"Windows Server 2003": "Windows 2001.1",
"Windows XP SP2": "Windows 2001 SP2",
"Windows Server 2003 SP1": "Windows 2001.1 SP1",
"Windows Vista": "Windows 2006",
"Windows Vista SP1": "Windows 2006 SP1",
"Windows Server 2008": "Windows 2006.1",
"Windows 7, Win Server 2008 R2": "Windows 2009",
"Windows 8, Win Server 2012": "Windows 2012",
"Windows 8.1": "Windows 2013",
"Windows 10": "Windows 2015",
"Windows 10, version 1607": "Windows 2016",
"Windows 10, version 1703": "Windows 2017",
"Windows 10, version 1709": "Windows 2017.2",
"Windows 10, version 1803": "Windows 2018",
"Windows 10, version 1809": "Windows 2018.2",
"Windows 10, version 1903": "Windows 2019",
"Windows 10, version 2004": "Windows 2020",
"Windows 11": "Windows 2021",
"Windows 11, version 22H2": "Windows 2022"
}
self.pre_patches = (
{
"PrePatch":"GPP7 duplicate _PRW methods",
"Comment" :"GPP7._PRW to XPRW to fix Gigabyte's Mistake",
"Find" :"3708584847500A021406535245470214065350525701085F505257",
"Replace" :"3708584847500A0214065352454702140653505257010858505257"
},
{
"PrePatch":"GPP7 duplicate UP00 devices",
"Comment" :"GPP7.UP00 to UPXX to fix Gigabyte's Mistake",
"Find" :"1047052F035F53425F50434930475050375B82450455503030",
"Replace" :"1047052F035F53425F50434930475050375B82450455505858"
},
{
"PrePatch":"GPP6 duplicate _PRW methods",
"Comment" :"GPP6._PRW to XPRW to fix ASRock's Mistake",
"Find" :"47505036085F4144520C04000200140F5F505257",
"Replace" :"47505036085F4144520C04000200140F58505257"
},
{
"PrePatch":"GPP1 duplicate PTXH devices",
"Comment" :"GPP1.PTXH to XTXH to fix MSI's Mistake",
"Find" :"50545848085F41445200140F",
"Replace" :"58545848085F41445200140F"
}
)
def save_settings(self):
settings = {
"legacy_compiler": self.iasl_legacy,
"resize_window": self.resize_window,
"match_mode": self.match_mode
}
try: json.dump(settings,open(self.settings,"w"),indent=2)
except: return
def load_settings(self):
try:
settings = json.load(open(self.settings))
if self.d.iasl_legacy: # Only load the legacy compiler setting if we can
self.iasl_legacy = settings.get("legacy_compiler",False)
self.resize_window = settings.get("resize_window",True)
self.match_mode = settings.get("match_mode",0)
except: return
def get_unique_name(self,name,target_folder,name_append="-Patched"):
# Get a new file name in the Results folder so we don't override the original
name = os.path.basename(name)
ext = "" if not "." in name else name.split(".")[-1]
if ext: name = name[:-len(ext)-1]
if name_append: name = name+str(name_append)
check_name = ".".join((name,ext)) if ext else name
if not os.path.exists(os.path.join(target_folder,check_name)):
return check_name
# We need a unique name
num = 1
while True:
check_name = "{}-{}".format(name,num)
if ext: check_name += "."+ext
if not os.path.exists(os.path.join(target_folder,check_name)):
return check_name
num += 1 # Increment our counter
def sorted_nicely(self, l):
convert = lambda text: int(text) if text.isdigit() else text
alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key.lower()) ]
return sorted(l, key = alphanum_key)
def load_dsdt(self, path):
if not path:
return
self.u.head("Loading ACPI Table(s)")
print("")
tables = []
trouble_dsdt = None
fixed = False
temp = None
prior_tables = self.d.acpi_tables # Retain in case of failure
# Clear any existing tables so we load anew
self.d.acpi_tables = {}
if os.path.isdir(path):
print("Gathering valid tables from {}...\n".format(os.path.basename(path)))
for t in self.sorted_nicely(os.listdir(path)):
if self.d.table_is_valid(path,t):
print(" - {}".format(t))
tables.append(t)
if not tables:
# Check if there's an ACPI directory within the passed
# directory - this may indicate SysReport was dropped
if os.path.isdir(os.path.join(path,"ACPI")):
# Rerun this function with that updated path
return self.load_dsdt(os.path.join(path,"ACPI"))
print(" - No valid .aml files were found!")
print("")
self.u.grab("Press [enter] to return...")
# Restore any prior tables
self.d.acpi_tables = prior_tables
return
print("")
# We got at least one file - let's look for the DSDT specifically
# and try to load that as-is. If it doesn't load, we'll have to
# manage everything with temp folders
dsdt_list = [x for x in tables if self.d._table_signature(path,x) == b"DSDT"]
if len(dsdt_list) > 1:
print("Multiple files with DSDT signature passed:")
for d in self.sorted_nicely(dsdt_list):
print(" - {}".format(d))
print("\nOnly one is allowed at a time. Please remove all but one of the above and try")
print("again.")
print("")
self.u.grab("Press [enter] to return...")
# Restore any prior tables
self.d.acpi_tables = prior_tables
return
# Get the DSDT, if any
dsdt = dsdt_list[0] if len(dsdt_list) else None
if dsdt: # Try to load it and see if it causes problems
print("Disassembling {} to verify if pre-patches are needed...".format(dsdt))
if not self.d.load(os.path.join(path,dsdt))[0]:
trouble_dsdt = dsdt
else:
print("\nDisassembled successfully!\n")
elif os.path.isfile(path):
print("Loading {}...".format(os.path.basename(path)))
if self.d.load(path)[0]:
print("\nDone.")
# If it loads fine - just return the path
# to the parent directory
return os.path.dirname(path)
if not self.d._table_signature(path) == b"DSDT":
# Not a DSDT, we aren't applying pre-patches
print("\n{} could not be disassembled!".format(os.path.basename(path)))
print("")
self.u.grab("Press [enter] to return...")
# Restore any prior tables
self.d.acpi_tables = prior_tables
return
# It didn't load - set it as the trouble file
trouble_dsdt = os.path.basename(path)
# Put the table in the tables list, and adjust
# the path to represent the parent dir
tables.append(os.path.basename(path))
path = os.path.dirname(path)
else:
print("Passed file/folder does not exist!")
print("")
self.u.grab("Press [enter] to return...")
# Restore any prior tables
self.d.acpi_tables = prior_tables
return
# If we got here - check if we have a trouble_dsdt.
if trouble_dsdt:
# We need to move our ACPI files to a temp folder
# then try patching the DSDT there
temp = tempfile.mkdtemp()
for table in tables:
shutil.copy(
os.path.join(path,table),
temp
)
# Get a reference to the new trouble file
trouble_path = os.path.join(temp,trouble_dsdt)
# Now we try patching it
print("Checking available pre-patches...")
print("Loading {} into memory...".format(trouble_dsdt))
with open(trouble_path,"rb") as f:
d = f.read()
res = self.d.check_output(self.output)
target_name = self.get_unique_name(trouble_dsdt,res,name_append="-Patched")
patches = []
print("Iterating patches...\n")
for p in self.pre_patches:
if not all(x in p for x in ("PrePatch","Comment","Find","Replace")): continue
print(" - {}".format(p["PrePatch"]))
find = binascii.unhexlify(p["Find"])
if d.count(find) == 1:
patches.append(p) # Retain the patch
repl = binascii.unhexlify(p["Replace"])
print(" --> Located - applying...")
d = d.replace(find,repl) # Replace it in memory
with open(trouble_path,"wb") as f:
f.write(d) # Write the updated file
# Attempt to load again
loaded_table = self.d.load(trouble_path)[0]
if loaded_table:
try:
table = loaded_table[list(loaded_table)[0]]
except:
pass
fixed = True
# We got it to load - let's write the patches
print("\nDisassembled successfully!\n")
self.make_plist(None, None, patches)
# Save to the local file
with open(os.path.join(res,target_name),"wb") as f:
f.write(d)
print("\n!! Patches applied to modified file in Results folder:\n {}".format(target_name))
self.patch_warn()
break
if not fixed:
print("\n{} could not be disassembled!".format(trouble_dsdt))
print("")
self.u.grab("Press [enter] to return...")
if temp:
shutil.rmtree(temp,ignore_errors=True)
# Restore any prior tables
self.d.acpi_tables = prior_tables
return
# Let's load the rest of the tables
if len(tables) > 1:
print("Loading valid tables in {}...".format(path))
loaded_tables,failed = self.d.load(temp or path)
if not loaded_tables or failed:
print("\nFailed to load tables in {}{}\n".format(
os.path.dirname(path) if os.path.isfile(path) else path,
":" if failed else ""
))
for t in self.sorted_nicely(failed):
print(" - {}".format(t))
# Restore any prior tables
if not loaded_tables:
self.d.acpi_tables = prior_tables
else:
if len(tables) > 1:
print("") # Newline for readability
print("Done.")
# If we had to patch the DSDT, or if not all tables loaded,
# make sure we get interaction from the user to continue
if trouble_dsdt or not loaded_tables or failed:
print("")
self.u.grab("Press [enter] to continue...")
if temp:
shutil.rmtree(temp,ignore_errors=True)
return path
def select_dsdt(self, single_table=False):
while True:
self.u.head("Select ACPI Table{}".format("" if single_table else "s"))
print(" ")
if self.copy_as_path:
print("NOTE: Currently running as admin on Windows - drag and drop may not work.")
print(" Shift + right-click in Explorer and select 'Copy as path' then paste here instead.")
print("")
print("M. Main")
print("Q. Quit")
print(" ")
if single_table:
print("NOTE: The function requesting this table expects either a single table, or one")
print(" with the DSDT signature. If neither condition is met, you will be")
print(" returned to the main menu.")
print("")
dsdt = self.u.grab("Please drag and drop an ACPI table or folder of tables here: ")
if dsdt.lower() == "m":
return self.dsdt
if dsdt.lower() == "q":
self.u.custom_quit()
out = self.u.check_path(dsdt)
if not out: continue
# Got a DSDT, try to load it
return self.load_dsdt(out)
def _ensure_dsdt(self, allow_any=False):
# Helper to check conditions for when we have valid tables
return self.dsdt and ((allow_any and self.d.acpi_tables) or (not allow_any and self.d.get_dsdt_or_only()))
def ensure_dsdt(self, allow_any=False):
if self._ensure_dsdt(allow_any=allow_any):
# Got it already
return True
# Need to prompt
self.dsdt = self.select_dsdt(single_table=not allow_any)
if self._ensure_dsdt(allow_any=allow_any):
return True
return False
def write_ssdt(self, ssdt_name, ssdt):
res = self.d.check_output(self.output)
dsl_path = os.path.join(res,ssdt_name+".dsl")
aml_path = os.path.join(res,ssdt_name+".aml")
iasl_path = self.d.iasl_legacy if self.iasl_legacy else self.d.iasl
with open(dsl_path,"w") as f:
f.write(ssdt)
print("Compiling...{}".format(" {}!! Using Legacy Compiler !!{}".format(self.yel,self.rst) if self.iasl_legacy else ""))
out = self.r.run({"args":[iasl_path, dsl_path]})
if out[2] != 0:
print(" - {}".format(out[1]))
self.re.reveal(dsl_path,True)
return False
else:
self.re.reveal(aml_path,True)
return True
def ensure_path(self, plist_data, path_list, final_type = list):
if not path_list: return plist_data
last = plist_data
for index,path in enumerate(path_list):
if not path in last:
if index >= len(path_list)-1:
last[path] = final_type()
else:
last[path] = {}
last = last[path]
return plist_data
def make_plist(self, oc_acpi, cl_acpi, patches, drops=[], replace=False):
# if not len(patches): return # No patches to add - bail
repeat = False
print("Building patches_OC and patches_Clover plists...")
output = self.d.check_output(self.output)
oc_plist = {}
cl_plist = {}
# Check for the plists
if os.path.isfile(os.path.join(output,"patches_OC.plist")):
e = os.path.join(output,"patches_OC.plist")
with open(e,"rb") as f:
oc_plist = plist.load(f)
if os.path.isfile(os.path.join(output,"patches_Clover.plist")):
e = os.path.join(output,"patches_Clover.plist")
with open(e,"rb") as f:
cl_plist = plist.load(f)
# Ensure all the pathing is where it needs to be
if oc_acpi: oc_plist = self.ensure_path(oc_plist,("ACPI","Add"))
if cl_acpi: cl_plist = self.ensure_path(cl_plist,("ACPI","SortedOrder"))
if patches:
oc_plist = self.ensure_path(oc_plist,("ACPI","Patch"))
cl_plist = self.ensure_path(cl_plist,("ACPI","DSDT","Patches"))
if drops:
oc_plist = self.ensure_path(oc_plist,("ACPI","Delete"))
cl_plist = self.ensure_path(cl_plist,("ACPI","DropTables"))
# Add the .aml references
if replace: # Remove any conflicting entries
if oc_acpi:
oc_plist["ACPI"]["Add"] = [x for x in oc_plist["ACPI"]["Add"] if oc_acpi["Path"] != x["Path"]]
if cl_acpi:
cl_plist["ACPI"]["SortedOrder"] = [x for x in cl_plist["ACPI"]["SortedOrder"] if cl_acpi != x]
if oc_acpi: # Make sure we have something
if any(oc_acpi["Path"] == x["Path"] for x in oc_plist["ACPI"]["Add"]):
print(" -> Add \"{}\" already in OC plist!".format(oc_acpi["Path"]))
else:
oc_plist["ACPI"]["Add"].append(oc_acpi)
if cl_acpi: # Make sure we have something
if cl_acpi in cl_plist["ACPI"]["SortedOrder"]:
print(" -> \"{}\" already in Clover plist!".format(cl_acpi))
else:
cl_plist["ACPI"]["SortedOrder"].append(cl_acpi)
# Iterate the patches
for p in patches:
ocp = self.get_oc_patch(p)
cp = self.get_clover_patch(p)
if replace: # Remove any conflicting entries
oc_plist["ACPI"]["Patch"] = [x for x in oc_plist["ACPI"]["Patch"] if ocp["Find"] != x["Find"] and ocp["Replace"] != x["Replace"]]
cl_plist["ACPI"]["DSDT"]["Patches"] = [x for x in cl_plist["ACPI"]["DSDT"]["Patches"] if cp["Find"] != x["Find"] and cp["Replace"] != x["Replace"]]
if any(x["Find"] == ocp["Find"] and x["Replace"] == ocp["Replace"] for x in oc_plist["ACPI"]["Patch"]):
print(" -> Patch \"{}\" already in OC plist!".format(p["Comment"]))
else:
print(" -> Adding Patch \"{}\" to OC plist!".format(p["Comment"]))
oc_plist["ACPI"]["Patch"].append(ocp)
if any(x["Find"] == cp["Find"] and x["Replace"] == cp["Replace"] for x in cl_plist["ACPI"]["DSDT"]["Patches"]):
print(" -> Patch \"{}\" already in Clover plist!".format(p["Comment"]))
else:
print(" -> Adding Patch \"{}\" to Clover plist!".format(p["Comment"]))
cl_plist["ACPI"]["DSDT"]["Patches"].append(cp)
# Iterate any dropped tables
for d in drops:
ocd = self.get_oc_drop(d)
cd = self.get_clover_drop(d)
if replace:
oc_plist["ACPI"]["Delete"] = [x for x in oc_plist["ACPI"]["Delete"] if ocd["TableSignature"] != x["TableSignature"] and ocd["OemTableId"] != x["OemTableId"]]
cl_plist["ACPI"]["DropTables"] = [x for x in cl_plist["ACPI"]["DropTables"] if cd.get("Signature") != x.get("Signature") and cd.get("TableId") != x.get("TableId")]
if any(x["TableSignature"] == ocd["TableSignature"] and x["OemTableId"] == ocd["OemTableId"] for x in oc_plist["ACPI"]["Delete"]):
print(" -> \"{}\" already in OC plist!".format(d["Comment"]))
else:
print(" -> Adding \"{}\" to OC plist!".format(d["Comment"]))
oc_plist["ACPI"]["Delete"].append(ocd)
name_parts = []
for x in ("Signature","TableId"):
if not cd.get(x): continue
n = cd[x]
if 2/3!=0 and not isinstance(n,str):
try: n = n.decode()
except: continue
name_parts.append(n.replace("?"," ").strip())
name = " - ".join(name_parts)
if any(x.get("Signature") == cd.get("Signature") and x.get("TableId") == cd.get("TableId") for x in cl_plist["ACPI"]["DropTables"]):
print(" -> \"{}\" already in Clover plist!".format(name or "Unknown Dropped Table"))
else:
cl_plist["ACPI"]["DropTables"].append(cd)
print(" -> Adding \"{}\" to Clover plist!".format(name or "Unknown Dropped Table"))
# Write the plists if we have something to write
if oc_plist:
with open(os.path.join(output,"patches_OC.plist"),"wb") as f:
plist.dump(oc_plist,f)
if cl_plist:
with open(os.path.join(output,"patches_Clover.plist"),"wb") as f:
plist.dump(cl_plist,f)
def patch_warn(self):
# Warn users to ensure they merge the patches_XX.plist contents with their config.plist
print("\n{}!! WARNING !!{} Make sure you merge the contents of patches_[OC/Clover].plist".format(self.red,self.rst))
print(" with your config.plist!\n")
def get_lpc_name(self,log=True,skip_ec=False,skip_common_names=False):
# Intel devices appear to use _ADR, 0x001F0000
# AMD devices appear to use _ADR, 0x00140003
if log: print("Locating LPC(B)/SBRG...")
for table_name in self.sorted_nicely(list(self.d.acpi_tables)):
table = self.d.acpi_tables[table_name]
# The LPCB device will always be the parent of the PNP0C09 device
# if found
if not skip_ec:
ec_list = self.d.get_device_paths_with_hid("PNP0C09",table=table)
if len(ec_list):
lpc_name = ".".join(ec_list[0][0].split(".")[:-1])
if log: print(" - Found {} in {}".format(lpc_name,table_name))
return lpc_name
# Maybe try common names if we haven't found it yet
if not skip_common_names:
for x in ("LPCB", "LPC0", "LPC", "SBRG", "PX40"):
try:
lpc_name = self.d.get_device_paths(x,table=table)[0][0]
if log: print(" - Found {} in {}".format(lpc_name,table_name))
return lpc_name
except: pass
# Finally check by address - some Intel tables have devices at
# 0x00140003
paths = self.d.get_path_of_type(obj_type="Name",obj="_ADR",table=table)
for path in paths:
adr = self.get_address_from_line(path[1],table=table)
if adr in (0x001F0000, 0x00140003):
# Get the path minus ._ADR
lpc_name = path[0][:-5]
# Make sure the LPCB device does not have an _HID
lpc_hid = lpc_name+"._HID"
if any(x[0]==lpc_hid for x in table.get("paths",[])):
continue
if log: print(" - Found {} in {}".format(lpc_name,table_name))
return lpc_name
if log:
print(" - Could not locate LPC(B)! Aborting!")
print("")
return None # Didn't find it
def fake_ec(self, laptop = False):
if not self.ensure_dsdt():
return
self.u.head("Fake EC")
print("")
print("Locating PNP0C09 (EC) devices...")
# Set up a helper method to determine
# if an _STA needs patching based on
# the type and returns.
def sta_needs_patching(sta):
if not isinstance(sta,dict) or not sta.get("sta"):
return False
# Check if we have an IntObj or MethodObj
# _STA, and scrape for values if possible.
if sta.get("sta_type") == "IntObj":
# We got an int - see if it's force-enabled
try:
sta_scope = table["lines"][sta["sta"][0][1]]
if not "Name (_STA, 0x0F)" in sta_scope:
return True
except Exception as e:
print(e)
return True
elif sta.get("sta_type") == "MethodObj":
# We got a method - if we have more than one
# "Return (", or not a single "Return (0x0F)",
# then we need to patch this out and replace
try:
sta_scope = "\n".join(self.d.get_scope(sta["sta"][0][1],strip_comments=True,table=table))
if sta_scope.count("Return (") > 1 or not "Return (0x0F)" in sta_scope:
# More than one return, or our return isn't force-enabled
return True
except Exception as e:
return True
# If we got here - it's not a recognized type, or
# it was fullly qualified and doesn't need patching
return False
rename = False
named_ec = False
ec_to_patch = []
ec_to_enable = []
ec_sta = {}
ec_enable_sta = {}
patches = []
lpc_name = None
ec_located = False
for table_name in self.sorted_nicely(list(self.d.acpi_tables)):
table = self.d.acpi_tables[table_name]
ec_list = self.d.get_device_paths_with_hid("PNP0C09",table=table)
if len(ec_list):
lpc_name = ".".join(ec_list[0][0].split(".")[:-1])
print(" - Got {:,} in {}".format(len(ec_list),table_name))
print(" - Validating...")
for x in ec_list:
device = orig_device = x[0]
print(" --> {}".format(device))
if device.split(".")[-1] == "EC":
named_ec = True
if not laptop:
# Only rename if we're trying to replace it
print(" ----> PNP0C09 (EC) called EC. Renaming")
device = ".".join(device.split(".")[:-1]+["EC0"])
rename = True
scope = "\n".join(self.d.get_scope(x[1],strip_comments=True,table=table))
# We need to check for _HID, _CRS, and _GPE
if all(y in scope for y in ["_HID","_CRS","_GPE"]):
print(" ----> Valid PNP0C09 (EC) Device")
ec_located = True
sta = self.get_sta_var(
var=None,
device=orig_device,
dev_hid="PNP0C09",
dev_name=orig_device.split(".")[-1],
log_locate=False,
table=table
)
if not laptop:
ec_to_patch.append(device)
# Only unconditionally override _STA methods
# if not building for a laptop
if sta.get("patches"):
patches.extend(sta.get("patches",[]))
ec_sta[device] = sta
elif sta.get("patches"):
if sta_needs_patching(sta):
# Retain the info as we need to override it
ec_to_enable.append(device)
ec_enable_sta[device] = sta
# Disable the patches by default and add to the list
for patch in sta.get("patches",[]):
patch["Enabled"] = False
patch["Disabled"] = True
patches.append(patch)
else:
print(" --> _STA properly enabled - skipping rename")
else:
print(" ----> NOT Valid PNP0C09 (EC) Device")
if not ec_located:
print(" - No valid PNP0C09 (EC) devices found - only needs a Fake EC device")
if laptop and named_ec and not patches:
print(" ----> Named EC device located - no fake needed.")
print("")
self.u.grab("Press [enter] to return to main menu...")
return
if lpc_name is None:
lpc_name = self.get_lpc_name(skip_ec=True,skip_common_names=True)
if lpc_name is None:
self.u.grab("Press [enter] to return to main menu...")
return
comment = "Faked Embedded Controller"
if rename == True:
patches.insert(0,{
"Comment":"EC to EC0{}".format("" if not ec_sta else " - must come before any EC _STA to XSTA renames!"),
"Find":"45435f5f",
"Replace":"4543305f"
})
comment += " - Needs EC to EC0 {}".format(
"and EC _STA to XSTA renames" if ec_sta else "rename"
)
elif ec_sta:
comment += " - Needs EC _STA to XSTA renames"
oc = {"Comment":comment,"Enabled":True,"Path":"SSDT-EC.aml"}
self.make_plist(oc, "SSDT-EC.aml", patches, replace=True)
print("Creating SSDT-EC...")
ssdt = """
DefinitionBlock ("", "SSDT", 2, "CORP ", "SsdtEC", 0x00001000)
{
External ([[LPCName]], DeviceObj)
""".replace("[[LPCName]]",lpc_name)
for x in ec_to_patch:
ssdt += " External ({}, DeviceObj)\n".format(x)
if x in ec_sta:
ssdt += " External ({}.XSTA, {})\n".format(x,ec_sta[x].get("sta_type","MethodObj"))
# Walk the ECs to enable
for x in ec_to_enable:
ssdt += " External ({}, DeviceObj)\n".format(x)
if x in ec_enable_sta:
# Add the _STA and XSTA refs as the patch may not be enabled
ssdt += " External ({0}._STA, {1})\n External ({0}.XSTA, {1})\n".format(x,ec_enable_sta[x].get("sta_type","MethodObj"))
# Walk them again and add the _STAs
for x in ec_to_patch:
ssdt += """
Scope ([[ECName]])
{
Method (_STA, 0, NotSerialized) // _STA: Status
{
If (_OSI ("Darwin"))
{
Return (0)
}
Else
{
Return ([[XSTA]])
}
}
}
""".replace("[[LPCName]]",lpc_name).replace("[[ECName]]",x) \
.replace("[[XSTA]]","{}.XSTA{}".format(x," ()" if ec_sta[x].get("sta_type","MethodObj")=="MethodObj" else "") if x in ec_sta else "0x0F")
# Walk them yet again - and force enable as needed
for x in ec_to_enable:
ssdt += """
If (LAnd (CondRefOf ([[ECName]].XSTA), LNot (CondRefOf ([[ECName]]._STA))))
{
Scope ([[ECName]])
{
Method (_STA, 0, NotSerialized) // _STA: Status
{
If (_OSI ("Darwin"))
{
Return (0x0F)
}
Else
{
Return ([[XSTA]])
}
}
}
}
""".replace("[[LPCName]]",lpc_name).replace("[[ECName]]",x) \
.replace("[[XSTA]]","{}.XSTA{}".format(x," ()" if ec_enable_sta[x].get("sta_type","MethodObj")=="MethodObj" else "") if x in ec_enable_sta else "Zero")
# Create the faked EC
if not laptop or not named_ec:
ssdt += """
Scope ([[LPCName]])
{
Device (EC)
{
Name (_HID, "ACID0001") // _HID: Hardware ID
Method (_STA, 0, NotSerialized) // _STA: Status
{
If (_OSI ("Darwin"))
{
Return (0x0F)
}
Else
{
Return (Zero)
}
}
}
}""".replace("[[LPCName]]",lpc_name)
# Close the SSDT scope
ssdt += """
}"""
self.write_ssdt("SSDT-EC",ssdt)
print("")
print("Done.")
self.patch_warn()
self.u.grab("Press [enter] to return...")
def plugin_type(self):
if not self.ensure_dsdt(allow_any=True):
return
self.u.head("Plugin Type")
print("")
print("Determining CPU name scheme...")
for table_name in self.sorted_nicely(list(self.d.acpi_tables)):
ssdt_name = "SSDT-PLUG"
table = self.d.acpi_tables[table_name]
if not table.get("signature") in (b"DSDT",b"SSDT"):
continue # We're not checking data tables
print(" Checking {}...".format(table_name))
try: cpu_name = self.d.get_processor_paths(table=table)[0][0]
except: cpu_name = None
if cpu_name:
print(" - Found Processor: {}".format(cpu_name))
oc = {"Comment":"Sets plugin-type to 1 on first Processor object","Enabled":True,"Path":ssdt_name+".aml"}
print("Creating SSDT-PLUG...")
ssdt = """//
// Based on the sample found at https://github.com/acidanthera/OpenCorePkg/blob/master/Docs/AcpiSamples/SSDT-PLUG.dsl
//
DefinitionBlock ("", "SSDT", 2, "CORP", "CpuPlug", 0x00003000)
{
External ([[CPUName]], ProcessorObj)
Scope ([[CPUName]])
{
If (_OSI ("Darwin")) {
Method (_DSM, 4, NotSerialized) // _DSM: Device-Specific Method
{
If (LNot (Arg2))
{
Return (Buffer (One)
{
0x03
})
}
Return (Package (0x02)
{
"plugin-type",
One
})
}
}
}
}""".replace("[[CPUName]]",cpu_name)
else:
ssdt_name += "-ALT"
print(" - No Processor objects found...")
procs = self.d.get_device_paths_with_hid(hid="ACPI0007",table=table)
if not procs:
print(" - No ACPI0007 devices found...")
continue
print(" - Located {:,} ACPI0007 device{}".format(
len(procs), "" if len(procs)==1 else "s"
))
parent = procs[0][0].split(".")[0]
print(" - Got parent at {}, iterating...".format(parent))
proc_list = []
for proc in procs:
print(" - Checking {}...".format(proc[0].split(".")[-1]))
uid = self.d.get_path_of_type(obj_type="Name",obj=proc[0]+"._UID",table=table)
if not uid:
print(" --> Not found! Skipping...")
continue
# Let's get the actual _UID value
try:
_uid = table["lines"][uid[0][1]].split("_UID, ")[1].split(")")[0]
print(" --> _UID: {}".format(_uid))
proc_list.append((proc[0],_uid))
except:
print(" --> Not found! Skipping...")
if not proc_list:
continue
print("Iterating {:,} valid processor device{}...".format(len(proc_list),"" if len(proc_list)==1 else "s"))
ssdt = """//
// Based on the sample found at https://github.com/acidanthera/OpenCorePkg/blob/master/Docs/AcpiSamples/Source/SSDT-PLUG-ALT.dsl
//
DefinitionBlock ("", "SSDT", 2, "CORP", "CpuPlugA", 0x00003000)
{
External ([[parent]], DeviceObj)
Scope ([[parent]])
{""".replace("[[parent]]",parent)
# Ensure our name scheme won't conflict
schemes = ("C000","CP00","P000","PR00","CX00","PX00")
# Walk the processor objects, and add them to the SSDT
for i,proc_uid in enumerate(proc_list):
proc,uid = proc_uid
adr = hex(i)[2:].upper()
name = None
for s in schemes:
name_check = s[:-len(adr)]+adr
check_path = "{}.{}".format(parent,name_check)
if self.d.get_path_of_type(obj_type="Device",obj=check_path,table=table):
continue # Already defined - skip
# If we got here - we found an unused name
name = name_check
break
if not name:
print(" - Could not find an available name scheme! Aborting.")
print("")
self.u.grab("Press [enter] to return to main menu...")
return
ssdt+="""
Processor ([[name]], [[uid]], 0x00000510, 0x06)
{
// [[proc]]
Name (_HID, "ACPI0007" /* Processor Device */) // _HID: Hardware ID
Name (_UID, [[uid]])
Method (_STA, 0, NotSerialized) // _STA: Status
{
If (_OSI ("Darwin"))
{
Return (0x0F)
}
Else
{
Return (Zero)
}
}""".replace("[[name]]",name).replace("[[uid]]",uid).replace("[[proc]]",proc)
if i == 0: # Got the first, add plugin-type as well
ssdt += """
Method (_DSM, 4, NotSerialized)
{
If (LNot (Arg2)) {
Return (Buffer (One) { 0x03 })
}
Return (Package (0x02)
{
"plugin-type",
One
})
}"""
# Close up the SSDT
ssdt += """
}"""
ssdt += """
}
}"""
oc = {"Comment":"Redefines modern CPU Devices as legacy Processor objects and sets plugin-type to 1 on the first","Enabled":True,"Path":ssdt_name+".aml"}
self.make_plist(oc, ssdt_name+".aml", ())
self.write_ssdt(ssdt_name,ssdt)
print("")
print("Done.")
self.patch_warn()
self.u.grab("Press [enter] to return...")
return
# If we got here - we reached the end
print("No valid processor devices found!")
print("")
self.u.grab("Press [enter] to return...")
return
def list_irqs(self):
# Walks the DSDT keeping track of the current device and
# saving the IRQNoFlags if found
devices = {}
current_device = None
current_hid = None
irq = False
last_irq = False
irq_index = 0
for index,line in enumerate(self.d.get_dsdt_or_only()["lines"]):
if self.d.is_hex(line):
# Skip all hex lines
continue
if irq:
# Get the values
num = line.split("{")[1].split("}")[0].replace(" ","")
num = "#" if not len(num) else num
if current_device in devices:
if last_irq: # In a row
devices[current_device]["irq"] += ":"+num
else: # Skipped at least one line
irq_index = self.d.find_next_hex(index)[1]
devices[current_device]["irq"] += "-"+str(irq_index)+"|"+num
else:
irq_index = self.d.find_next_hex(index)[1]
devices[current_device] = {"irq":str(irq_index)+"|"+num}
irq = False
last_irq = True
elif "Device (" in line:
# Check if we retain the _HID here
if current_device and current_device in devices and current_hid:
# Save it
devices[current_device]["hid"] = current_hid
last_irq = False
current_hid = None
try: current_device = line.split("(")[1].split(")")[0]
except:
current_device = None
continue
elif "_HID, " in line and current_device:
try: current_hid = line.split('"')[1]
except: pass
elif "IRQNoFlags" in line and current_device:
# Next line has our interrupts
irq = True
# Check if just a filler line
elif len(line.replace("{","").replace("}","").replace("(","").replace(")","").replace(" ","").split("//")[0]):
# Reset last IRQ as it's not in a row
last_irq = False
# Retain the final _HID if needed
if current_device and current_device in devices and current_hid:
devices[current_device]["hid"] = current_hid
return devices
def get_hex_from_irqs(self, irq, rem_irq = None):
# We need to search for a few different types:
#
# 22 XX XX 22 XX XX 22 XX XX (multiples on different lines)
# 22 XX XX (summed multiples in the same bracket - {0,8,11})
# 22 XX XX (single IRQNoFlags entry)
#
# Can end with 79 [00] (end of method), 86 09 (middle of method) or 47 01 (unknown)
lines = []
remd = []
for a in irq.split("-"):
index,i = a.split("|") # Get the index
index = int(index)
find = self.get_int_for_line(i)
repl = [0]*len(find)
# Now we need to verify if we're patching *all* IRQs, or just some specifics
if rem_irq:
repl = [x for x in find]
matched = []
for x in rem_irq:
# Get the int
rem = self.convert_irq_to_int(x)
repl1 = [y&(rem^0xFFFF) if y >= rem else y for y in repl]
if repl1 != repl:
# Changes were made
remd.append(x)
repl = [y for y in repl1]
# Get the hex
d = {
"irq":i,
"find": "".join(["22"+self.d.get_hex_from_int(x) for x in find]),
"repl": "".join(["22"+self.d.get_hex_from_int(x) for x in repl]),
"remd": remd,
"index": index
}
d["changed"] = not (d["find"]==d["repl"])
lines.append(d)
return lines
def get_int_for_line(self, irq):
irq_list = []
for i in irq.split(":"):
irq_list.append(self.same_line_irq(i))
return irq_list
def convert_irq_to_int(self, irq):
b = "0"*(16-irq)+"1"+"0"*(irq)
return int(b,2)
def same_line_irq(self, irq):
# We sum the IRQ values and return the int
total = 0
for i in irq.split(","):
if i == "#":
continue # Null value
try: i=int(i)
except: continue # Not an int
if i > 15 or i < 0:
continue # Out of range